diff --git a/docs/vmagent.md b/docs/vmagent.md
index dbd9b4c0b..996782f37 100644
--- a/docs/vmagent.md
+++ b/docs/vmagent.md
@@ -771,12 +771,13 @@ e.g. it sets `scrape_series_added` metric to zero. See [these docs](#automatical
## Stream parsing mode
-By default, `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling)
-and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases
-when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory
-when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode.
-When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk
-and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
+By default, `vmagent` parses the full response from the scrape target, applies [relabeling](#relabeling)
+and then pushes the resulting metrics to the configured `-remoteWrite.url` in one go. This mode works good for the majority of cases
+when the scrape target exposes small number of metrics (e.g. less than 10K). But this mode may take big amounts of memory
+when the scrape target exposes big number of metrics (for example, when `vmagent` scrapes [`kube-state-metrics`](https://github.com/kubernetes/kube-state-metrics)
+in large Kubernetes cluster). It is recommended enabling stream parsing mode for such targets.
+When this mode is enabled, `vmagent` processes the response from the scrape target in chunks.
+This allows saving memory when scraping targets that expose millions of metrics.
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than
the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally,
diff --git a/go.mod b/go.mod
index 5b1f75420..f7a48e8de 100644
--- a/go.mod
+++ b/go.mod
@@ -8,10 +8,6 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
github.com/VictoriaMetrics/easyproto v0.1.4
github.com/VictoriaMetrics/fastcache v1.12.2
-
- // Do not use the original github.com/valyala/fasthttp because of issues
- // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
- github.com/VictoriaMetrics/fasthttp v1.2.0
github.com/VictoriaMetrics/metrics v1.31.0
github.com/VictoriaMetrics/metricsql v0.70.0
github.com/aws/aws-sdk-go-v2 v1.24.1
@@ -34,7 +30,7 @@ require (
github.com/valyala/gozstd v1.20.1
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.7.0
- golang.org/x/net v0.20.0
+ golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.16.0
golang.org/x/sys v0.16.0
google.golang.org/api v0.159.0
diff --git a/go.sum b/go.sum
index a10311dfb..ee36721f8 100644
--- a/go.sum
+++ b/go.sum
@@ -63,8 +63,6 @@ github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
-github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2blTJwfyU9I=
-github.com/VictoriaMetrics/fasthttp v1.2.0/go.mod h1:zv5YSmasAoSyv8sBVexfArzFDIGGTN4TfCKAtAw7IfE=
github.com/VictoriaMetrics/metrics v1.24.0/go.mod h1:eFT25kvsTidQFHb6U0oa0rTrDRdz4xTYjpL8+UPohys=
github.com/VictoriaMetrics/metrics v1.31.0 h1:X6+nBvAP0UB+GjR0Ht9hhQ3pjL1AN4b8dt9zFfzTsUo=
github.com/VictoriaMetrics/metrics v1.31.0/go.mod h1:r7hveu6xMdUACXvB8TYdAj8WEsKzWB0EkpJN+RDtOf8=
diff --git a/lib/promauth/config.go b/lib/promauth/config.go
index a740295f5..6b95a8017 100644
--- a/lib/promauth/config.go
+++ b/lib/promauth/config.go
@@ -14,7 +14,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
- "github.com/VictoriaMetrics/fasthttp"
"github.com/cespare/xxhash/v2"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
@@ -343,24 +342,6 @@ func (ac *Config) SetHeaders(req *http.Request, setAuthHeader bool) error {
return nil
}
-// SetFasthttpHeaders sets the configured ac headers to req.
-func (ac *Config) SetFasthttpHeaders(req *fasthttp.Request, setAuthHeader bool) error {
- reqHeaders := &req.Header
- for _, h := range ac.headers {
- reqHeaders.Set(h.key, h.value)
- }
- if setAuthHeader {
- ah, err := ac.GetAuthHeader()
- if err != nil {
- return fmt.Errorf("failed to obtain Authorization request header: %w", err)
- }
- if ah != "" {
- reqHeaders.Set("Authorization", ah)
- }
- }
- return nil
-}
-
// GetAuthHeader returns optional `Authorization: ...` http header.
func (ac *Config) GetAuthHeader() (string, error) {
if f := ac.getAuthHeaderCached; f != nil {
diff --git a/lib/promauth/config_test.go b/lib/promauth/config_test.go
index f171c5b34..861ab3265 100644
--- a/lib/promauth/config_test.go
+++ b/lib/promauth/config_test.go
@@ -5,7 +5,6 @@ import (
"net/http/httptest"
"testing"
- "github.com/VictoriaMetrics/fasthttp"
"gopkg.in/yaml.v2"
)
@@ -307,12 +306,6 @@ func TestConfigGetAuthHeaderFailure(t *testing.T) {
t.Fatalf("expecting non-nil error from SetHeaders()")
}
- // Verify that cfg.SetFasthttpHeaders() returns error
- var fhreq fasthttp.Request
- if err := cfg.SetFasthttpHeaders(&fhreq, true); err == nil {
- t.Fatalf("expecting non-nil error from SetFasthttpHeaders()")
- }
-
// Verify that the tls cert cannot be loaded properly if it exists
if f := cfg.getTLSCertCached; f != nil {
cert, err := f(nil)
@@ -421,16 +414,6 @@ func TestConfigGetAuthHeaderSuccess(t *testing.T) {
if ah != ahExpected {
t.Fatalf("unexpected auth header from net/http request; got %q; want %q", ah, ahExpected)
}
-
- // Make sure that cfg.SetFasthttpHeaders() properly set Authorization header
- var fhreq fasthttp.Request
- if err := cfg.SetFasthttpHeaders(&fhreq, true); err != nil {
- t.Fatalf("unexpected error in SetFasthttpHeaders(): %s", err)
- }
- ahb := fhreq.Header.Peek("Authorization")
- if string(ahb) != ahExpected {
- t.Fatalf("unexpected auth header from fasthttp request; got %q; want %q", ahb, ahExpected)
- }
}
// Zero config
@@ -578,16 +561,6 @@ func TestConfigHeaders(t *testing.T) {
t.Fatalf("unexpected value for net/http header %q; got %q; want %q", h.key, v, h.value)
}
}
- var fhreq fasthttp.Request
- if err := c.SetFasthttpHeaders(&fhreq, false); err != nil {
- t.Fatalf("unexpected error in SetFasthttpHeaders(): %s", err)
- }
- for _, h := range headersParsed {
- v := fhreq.Header.Peek(h.key)
- if string(v) != h.value {
- t.Fatalf("unexpected value for fasthttp header %q; got %q; want %q", h.key, v, h.value)
- }
- }
}
f(nil, "")
f([]string{"foo: bar"}, "foo: bar\r\n")
diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go
index dca170815..48a23dc05 100644
--- a/lib/promscrape/client.go
+++ b/lib/promscrape/client.go
@@ -13,10 +13,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
- "github.com/VictoriaMetrics/fasthttp"
"github.com/VictoriaMetrics/metrics"
)
@@ -37,57 +33,16 @@ var (
)
type client struct {
- // hc is the default client optimized for common case of scraping targets with moderate number of metrics.
- hc *fasthttp.HostClient
-
- // sc (aka `stream client`) is used instead of hc if ScrapeWork.StreamParse is set.
- // It may be useful for scraping targets with millions of metrics per target.
- sc *http.Client
-
+ c *http.Client
ctx context.Context
scrapeURL string
scrapeTimeoutSecondsStr string
- hostPort string
- requestURI string
setHeaders func(req *http.Request) error
setProxyHeaders func(req *http.Request) error
- setFasthttpHeaders func(req *fasthttp.Request) error
- setFasthttpProxyHeaders func(req *fasthttp.Request) error
- denyRedirects bool
- disableCompression bool
- disableKeepAlive bool
}
-func addMissingPort(addr string, isTLS bool) string {
- if strings.Contains(addr, ":") {
- return addr
- }
- if isTLS {
- return concatTwoStrings(addr, ":443")
- }
- return concatTwoStrings(addr, ":80")
-}
-
-func concatTwoStrings(x, y string) string {
- bb := bbPool.Get()
- b := bb.B[:0]
- b = append(b, x...)
- b = append(b, y...)
- s := bytesutil.InternBytes(b)
- bb.B = b
- bbPool.Put(bb)
- return s
-}
-
-const scrapeUserAgent = "vm_promscrape"
-
func newClient(ctx context.Context, sw *ScrapeWork) (*client, error) {
- var u fasthttp.URI
- u.Update(sw.ScrapeURL)
- hostPort := string(u.Host())
- dialAddr := hostPort
- requestURI := string(u.RequestURI())
- isTLS := string(u.Scheme()) == "https"
+ isTLS := strings.HasPrefix(sw.ScrapeURL, "https://")
var tlsCfg *tls.Config
if isTLS {
var err error
@@ -96,59 +51,31 @@ func newClient(ctx context.Context, sw *ScrapeWork) (*client, error) {
return nil, fmt.Errorf("cannot initialize tls config: %w", err)
}
}
- setProxyHeaders := func(req *http.Request) error { return nil }
- setFasthttpProxyHeaders := func(req *fasthttp.Request) error { return nil }
+ setHeaders := func(req *http.Request) error {
+ return sw.AuthConfig.SetHeaders(req, true)
+ }
+ setProxyHeaders := func(req *http.Request) error {
+ return nil
+ }
proxyURL := sw.ProxyURL
if !isTLS && proxyURL.IsHTTPOrHTTPS() {
- // Send full sw.ScrapeURL in requests to a proxy host for non-TLS scrape targets
- // like net/http package from Go does.
- // See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers
pu := proxyURL.GetURL()
- dialAddr = pu.Host
- requestURI = sw.ScrapeURL
- isTLS = pu.Scheme == "https"
- if isTLS {
+ if pu.Scheme == "https" {
var err error
tlsCfg, err = sw.ProxyAuthConfig.NewTLSConfig()
if err != nil {
return nil, fmt.Errorf("cannot initialize proxy tls config: %w", err)
}
}
- proxyURLOrig := proxyURL
setProxyHeaders = func(req *http.Request) error {
- return proxyURLOrig.SetHeaders(sw.ProxyAuthConfig, req)
+ return proxyURL.SetHeaders(sw.ProxyAuthConfig, req)
}
- setFasthttpProxyHeaders = func(req *fasthttp.Request) error {
- return proxyURLOrig.SetFasthttpHeaders(sw.ProxyAuthConfig, req)
- }
- proxyURL = &proxy.URL{}
}
- hostPort = addMissingPort(hostPort, isTLS)
- dialAddr = addMissingPort(dialAddr, isTLS)
- dialFunc, err := newStatDialFunc(proxyURL, sw.ProxyAuthConfig)
- if err != nil {
- return nil, fmt.Errorf("cannot create dial func: %w", err)
- }
- hc := &fasthttp.HostClient{
- Addr: dialAddr,
- // Name used in User-Agent request header
- Name: scrapeUserAgent,
- Dial: dialFunc,
- IsTLS: isTLS,
- TLSConfig: tlsCfg,
- MaxIdleConnDuration: 2 * sw.ScrapeInterval,
- ReadTimeout: sw.ScrapeTimeout,
- WriteTimeout: 10 * time.Second,
- MaxResponseBodySize: maxScrapeSize.IntN(),
- MaxIdempotentRequestAttempts: 1,
- ReadBufferSize: maxResponseHeadersSize.IntN(),
- }
- var sc *http.Client
var proxyURLFunc func(*http.Request) (*url.URL, error)
if pu := sw.ProxyURL.GetURL(); pu != nil {
proxyURLFunc = http.ProxyURL(pu)
}
- sc = &http.Client{
+ hc := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
Proxy: proxyURLFunc,
@@ -163,41 +90,29 @@ func newClient(ctx context.Context, sw *ScrapeWork) (*client, error) {
Timeout: sw.ScrapeTimeout,
}
if sw.DenyRedirects {
- sc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ hc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
c := &client{
- hc: hc,
+ c: hc,
ctx: ctx,
- sc: sc,
scrapeURL: sw.ScrapeURL,
scrapeTimeoutSecondsStr: fmt.Sprintf("%.3f", sw.ScrapeTimeout.Seconds()),
- hostPort: hostPort,
- requestURI: requestURI,
- setHeaders: func(req *http.Request) error {
- return sw.AuthConfig.SetHeaders(req, true)
- },
- setProxyHeaders: setProxyHeaders,
- setFasthttpHeaders: func(req *fasthttp.Request) error {
- return sw.AuthConfig.SetFasthttpHeaders(req, true)
- },
- setFasthttpProxyHeaders: setFasthttpProxyHeaders,
- denyRedirects: sw.DenyRedirects,
- disableCompression: sw.DisableCompression,
- disableKeepAlive: sw.DisableKeepAlive,
+ setHeaders: setHeaders,
+ setProxyHeaders: setProxyHeaders,
}
return c, nil
}
-func (c *client) GetStreamReader() (*streamReader, error) {
- deadline := time.Now().Add(c.sc.Timeout)
+func (c *client) ReadData(dst *bytesutil.ByteBuffer) error {
+ deadline := time.Now().Add(c.c.Timeout)
ctx, cancel := context.WithDeadline(c.ctx, deadline)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.scrapeURL, nil)
if err != nil {
cancel()
- return nil, fmt.Errorf("cannot create request for %q: %w", c.scrapeURL, err)
+ return fmt.Errorf("cannot create request for %q: %w", c.scrapeURL, err)
}
// The following `Accept` header has been copied from Prometheus sources.
// See https://github.com/prometheus/prometheus/blob/f9d21f10ecd2a343a381044f131ea4e46381ce09/scrape/scrape.go#L532 .
@@ -208,236 +123,59 @@ func (c *client) GetStreamReader() (*streamReader, error) {
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
- req.Header.Set("User-Agent", scrapeUserAgent)
+ req.Header.Set("User-Agent", "vm_promscrape")
if err := c.setHeaders(req); err != nil {
cancel()
- return nil, fmt.Errorf("failed to set request headers for %q: %w", c.scrapeURL, err)
+ return fmt.Errorf("failed to set request headers for %q: %w", c.scrapeURL, err)
}
if err := c.setProxyHeaders(req); err != nil {
cancel()
- return nil, fmt.Errorf("failed to set proxy request headers for %q: %w", c.scrapeURL, err)
+ return fmt.Errorf("failed to set proxy request headers for %q: %w", c.scrapeURL, err)
}
scrapeRequests.Inc()
- resp, err := c.sc.Do(req)
+ resp, err := c.c.Do(req)
if err != nil {
cancel()
- return nil, fmt.Errorf("cannot scrape %q: %w", c.scrapeURL, err)
+ if ue, ok := err.(*url.Error); ok && ue.Timeout() {
+ scrapesTimedout.Inc()
+ }
+ return fmt.Errorf("cannot perform request to %q: %w", c.scrapeURL, err)
}
if resp.StatusCode != http.StatusOK {
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_total{status_code="%d"}`, resp.StatusCode)).Inc()
respBody, _ := io.ReadAll(resp.Body)
_ = resp.Body.Close()
cancel()
- return nil, fmt.Errorf("unexpected status code returned when scraping %q: %d; expecting %d; response body: %q",
+ return fmt.Errorf("unexpected status code returned when scraping %q: %d; expecting %d; response body: %q",
c.scrapeURL, resp.StatusCode, http.StatusOK, respBody)
}
scrapesOK.Inc()
- sr := &streamReader{
- r: resp.Body,
- cancel: cancel,
- scrapeURL: c.scrapeURL,
- maxBodySize: int64(c.hc.MaxResponseBodySize),
- }
- return sr, nil
-}
-// checks fasthttp status code for redirect as standard http/client does.
-func isStatusRedirect(statusCode int) bool {
- switch statusCode {
- case 301, 302, 303, 307, 308:
- return true
+ // Read the data from resp.Body
+ r := &io.LimitedReader{
+ R: resp.Body,
+ N: maxScrapeSize.N,
}
- return false
-}
-
-func (c *client) ReadData(dst []byte) ([]byte, error) {
- deadline := time.Now().Add(c.hc.ReadTimeout)
- req := fasthttp.AcquireRequest()
- req.SetRequestURI(c.requestURI)
- req.Header.SetHost(c.hostPort)
- // The following `Accept` header has been copied from Prometheus sources.
- // See https://github.com/prometheus/prometheus/blob/f9d21f10ecd2a343a381044f131ea4e46381ce09/scrape/scrape.go#L532 .
- // This is needed as a workaround for scraping stupid Java-based servers such as Spring Boot.
- // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/608 for details.
- // Do not bloat the `Accept` header with OpenMetrics shit, since it looks like dead standard now.
- req.Header.Set("Accept", "text/plain;version=0.0.4;q=1,*/*;q=0.1")
- // Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
- // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
- req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
- if err := c.setFasthttpHeaders(req); err != nil {
- return nil, fmt.Errorf("failed to set request headers for %q: %w", c.scrapeURL, err)
- }
- if err := c.setFasthttpProxyHeaders(req); err != nil {
- return nil, fmt.Errorf("failed to set proxy request headers for %q: %w", c.scrapeURL, err)
- }
- if !*disableCompression && !c.disableCompression {
- req.Header.Set("Accept-Encoding", "gzip")
- }
- if *disableKeepAlive || c.disableKeepAlive {
- req.SetConnectionClose()
- }
- resp := fasthttp.AcquireResponse()
- swapResponseBodies := len(dst) == 0
- if swapResponseBodies {
- // An optimization: write response directly to dst.
- // This should reduce memory usage when scraping big targets.
- dst = resp.SwapBody(dst)
- }
-
- ctx, cancel := context.WithDeadline(c.ctx, deadline)
- defer cancel()
-
- err := doRequestWithPossibleRetry(ctx, c.hc, req, resp)
- statusCode := resp.StatusCode()
- redirectsCount := 0
- for err == nil && isStatusRedirect(statusCode) {
- if redirectsCount > 5 {
- err = fmt.Errorf("too many redirects")
- break
- }
- if c.denyRedirects {
- err = fmt.Errorf("cannot follow redirects if `follow_redirects: false` is set")
- break
- }
- // It is expected that the redirect is made on the same host.
- // Otherwise it won't work.
- location := resp.Header.Peek("Location")
- if len(location) == 0 {
- err = fmt.Errorf("missing Location header")
- break
- }
- req.URI().UpdateBytes(location)
- err = doRequestWithPossibleRetry(ctx, c.hc, req, resp)
- statusCode = resp.StatusCode()
- redirectsCount++
- }
- if swapResponseBodies {
- dst = resp.SwapBody(dst)
- }
- fasthttp.ReleaseRequest(req)
+ _, err = dst.ReadFrom(r)
+ _ = resp.Body.Close()
+ cancel()
if err != nil {
- fasthttp.ReleaseResponse(resp)
- if err == fasthttp.ErrTimeout {
+ if ue, ok := err.(*url.Error); ok && ue.Timeout() {
scrapesTimedout.Inc()
- return dst, fmt.Errorf("error when scraping %q with timeout %s: %w", c.scrapeURL, c.hc.ReadTimeout, err)
}
- if err == fasthttp.ErrBodyTooLarge {
- maxScrapeSizeExceeded.Inc()
- return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
- "either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, maxScrapeSize.N)
- }
- return dst, fmt.Errorf("error when scraping %q: %w", c.scrapeURL, err)
+ return fmt.Errorf("cannot read data from %s: %w", c.scrapeURL, err)
}
- if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
- var err error
- if swapResponseBodies {
- zb := gunzipBufPool.Get()
- zb.B, err = fasthttp.AppendGunzipBytes(zb.B[:0], dst)
- dst = append(dst[:0], zb.B...)
- gunzipBufPool.Put(zb)
- } else {
- dst, err = fasthttp.AppendGunzipBytes(dst, resp.Body())
- }
- if err != nil {
- fasthttp.ReleaseResponse(resp)
- scrapesGunzipFailed.Inc()
- return dst, fmt.Errorf("cannot ungzip response from %q: %w", c.scrapeURL, err)
- }
- scrapesGunzipped.Inc()
- } else if !swapResponseBodies {
- dst = append(dst, resp.Body()...)
- }
- fasthttp.ReleaseResponse(resp)
- if len(dst) > c.hc.MaxResponseBodySize {
+ if int64(len(dst.B)) >= maxScrapeSize.N {
maxScrapeSizeExceeded.Inc()
- return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d (the actual response size is %d bytes); "+
- "either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, maxScrapeSize.N, len(dst))
+ return fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
+ "either reduce the response size for the target or increase -promscrape.maxScrapeSize command-line flag value", c.scrapeURL, maxScrapeSize.N)
}
- if statusCode != fasthttp.StatusOK {
- metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_total{status_code="%d"}`, statusCode)).Inc()
- return dst, fmt.Errorf("unexpected status code returned when scraping %q: %d; expecting %d; response body: %q",
- c.scrapeURL, statusCode, fasthttp.StatusOK, dst)
- }
- scrapesOK.Inc()
- return dst, nil
+ return nil
}
-var gunzipBufPool bytesutil.ByteBufferPool
-
var (
maxScrapeSizeExceeded = metrics.NewCounter(`vm_promscrape_max_scrape_size_exceeded_errors_total`)
scrapesTimedout = metrics.NewCounter(`vm_promscrape_scrapes_timed_out_total`)
scrapesOK = metrics.NewCounter(`vm_promscrape_scrapes_total{status_code="200"}`)
- scrapesGunzipped = metrics.NewCounter(`vm_promscrape_scrapes_gunziped_total`)
- scrapesGunzipFailed = metrics.NewCounter(`vm_promscrape_scrapes_gunzip_failed_total`)
scrapeRequests = metrics.NewCounter(`vm_promscrape_scrape_requests_total`)
- scrapeRetries = metrics.NewCounter(`vm_promscrape_scrape_retries_total`)
)
-
-func doRequestWithPossibleRetry(ctx context.Context, hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response) error {
- scrapeRequests.Inc()
-
- var reqErr error
- // Return true if the request execution is completed and retry is not required
- attempt := func() bool {
- // Use DoCtx instead of Do in order to support context cancellation
- reqErr = hc.DoCtx(ctx, req, resp)
- if reqErr == nil {
- statusCode := resp.StatusCode()
- if statusCode != fasthttp.StatusTooManyRequests {
- return true
- }
- } else if reqErr != fasthttp.ErrConnectionClosed && !strings.Contains(reqErr.Error(), "broken pipe") {
- return true
- }
- return false
- }
-
- if attempt() {
- return reqErr
- }
-
- // The first attempt was unsuccessful. Use exponential backoff for further attempts.
- // Perform the second attempt immediately after the first attempt - this should help
- // in cases when the remote side closes the keep-alive connection before the first attempt.
- // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3293
- sleepTime := time.Second
- // It is expected that the deadline is already set to ctx, so the loop below
- // should eventually finish if all the attempt() calls are unsuccessful.
- for {
- scrapeRetries.Inc()
- if attempt() {
- return reqErr
- }
- sleepTime += sleepTime
- if !discoveryutils.SleepCtx(ctx, sleepTime) {
- return reqErr
- }
- }
-}
-
-type streamReader struct {
- r io.ReadCloser
- cancel context.CancelFunc
- bytesRead int64
- scrapeURL string
- maxBodySize int64
-}
-
-func (sr *streamReader) Read(p []byte) (int, error) {
- n, err := sr.r.Read(p)
- sr.bytesRead += int64(n)
- if err == nil && sr.bytesRead > sr.maxBodySize {
- maxScrapeSizeExceeded.Inc()
- err = fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
- "either reduce the response size for the target or increase -promscrape.maxScrapeSize", sr.scrapeURL, sr.maxBodySize)
- }
- return n, err
-}
-
-func (sr *streamReader) MustClose() {
- sr.cancel()
- if err := sr.r.Close(); err != nil {
- logger.Errorf("cannot close reader: %s", err)
- }
-}
diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go
index 648051b21..ad6319203 100644
--- a/lib/promscrape/scraper.go
+++ b/lib/promscrape/scraper.go
@@ -455,7 +455,6 @@ func newScraper(sw *ScrapeWork, group string, pushData func(at *auth.Token, wr *
sc.sw.Config = sw
sc.sw.ScrapeGroup = group
sc.sw.ReadData = c.ReadData
- sc.sw.GetStreamReader = c.GetStreamReader
sc.sw.PushData = pushData
return sc, nil
}
diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go
index 2bb3846c1..c910745e9 100644
--- a/lib/promscrape/scrapework.go
+++ b/lib/promscrape/scrapework.go
@@ -4,7 +4,6 @@ import (
"bytes"
"flag"
"fmt"
- "io"
"math"
"math/bits"
"strings"
@@ -17,7 +16,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/leveledbytebufferpool"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
@@ -186,11 +184,8 @@ type scrapeWork struct {
// Config for the scrape.
Config *ScrapeWork
- // ReadData is called for reading the data.
- ReadData func(dst []byte) ([]byte, error)
-
- // GetStreamReader is called if Config.StreamParse is set.
- GetStreamReader func() (*streamReader, error)
+ // ReadData is called for reading the scrape response data into dst.
+ ReadData func(dst *bytesutil.ByteBuffer) error
// PushData is called for pushing collected data.
PushData func(at *auth.Token, wr *prompbmarshal.WriteRequest)
@@ -400,7 +395,10 @@ var (
pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds")
)
-func (sw *scrapeWork) mustSwitchToStreamParseMode(responseSize int) bool {
+func (sw *scrapeWork) needStreamParseMode(responseSize int) bool {
+ if *streamParse || sw.Config.StreamParse {
+ return true
+ }
if minResponseSizeForStreamParse.N <= 0 {
return false
}
@@ -409,59 +407,61 @@ func (sw *scrapeWork) mustSwitchToStreamParseMode(responseSize int) bool {
// getTargetResponse() fetches response from sw target in the same way as when scraping the target.
func (sw *scrapeWork) getTargetResponse() ([]byte, error) {
- // use stream reader when stream mode enabled
- if *streamParse || sw.Config.StreamParse || sw.mustSwitchToStreamParseMode(sw.prevBodyLen) {
- // Read the response in stream mode.
- sr, err := sw.GetStreamReader()
- if err != nil {
- return nil, err
- }
- data, err := io.ReadAll(sr)
- sr.MustClose()
- return data, err
+ var bb bytesutil.ByteBuffer
+ if err := sw.ReadData(&bb); err != nil {
+ return nil, err
}
- // Read the response in usual mode.
- return sw.ReadData(nil)
+ return bb.B, nil
}
func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error {
- if *streamParse || sw.Config.StreamParse || sw.mustSwitchToStreamParseMode(sw.prevBodyLen) {
- // Read data from scrape targets in streaming manner.
- // This case is optimized for targets exposing more than ten thousand of metrics per target.
- return sw.scrapeStream(scrapeTimestamp, realTimestamp)
+ body := leveledbytebufferpool.Get(sw.prevBodyLen)
+
+ // Read the scrape response into body.
+ // It is OK to do for stream parsing parsing mode, since the most of RAM
+ // is occupied during parsing of the read response body below.
+ // This also allows measuring the real scrape duration, which doesn't include
+ // the time needed for processing of the read response.
+ err := sw.ReadData(body)
+
+ // Measure scrape duration.
+ endTimestamp := time.Now().UnixNano() / 1e6
+ scrapeDurationSeconds := float64(endTimestamp-realTimestamp) / 1e3
+ scrapeDuration.Update(scrapeDurationSeconds)
+ scrapeResponseSize.Update(float64(len(body.B)))
+
+ // The code below is CPU-bound, while it may allocate big amounts of memory.
+ // That's why it is a good idea to limit the number of concurrent goroutines,
+ // which may execute this code, in order to limit memory usage under high load
+ // without sacrificing the performance.
+ processScrapedDataConcurrencyLimitCh <- struct{}{}
+
+ if err == nil && sw.needStreamParseMode(len(body.B)) {
+ // Process response body from scrape target in streaming manner.
+ // This case is optimized for targets exposing more than ten thousand of metrics per target,
+ // such as kube-state-metrics.
+ err = sw.processDataInStreamMode(scrapeTimestamp, realTimestamp, body, scrapeDurationSeconds)
+ } else {
+ // Process response body from scrape target at once.
+ // This case should work more optimally than stream parse for common case when scrape target exposes
+ // up to a few thousand metrics.
+ err = sw.processDataOneShot(scrapeTimestamp, realTimestamp, body.B, scrapeDurationSeconds, err)
}
- // Common case: read all the data from scrape target to memory (body) and then process it.
- // This case should work more optimally than stream parse code for common case when scrape target exposes
- // up to a few thousand metrics.
- body := leveledbytebufferpool.Get(sw.prevBodyLen)
- var err error
- body.B, err = sw.ReadData(body.B[:0])
- releaseBody, err := sw.processScrapedData(scrapeTimestamp, realTimestamp, body, err)
- if releaseBody {
- leveledbytebufferpool.Put(body)
- }
+ <-processScrapedDataConcurrencyLimitCh
+
+ leveledbytebufferpool.Put(body)
+
return err
}
var processScrapedDataConcurrencyLimitCh = make(chan struct{}, cgroup.AvailableCPUs())
-func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, body *bytesutil.ByteBuffer, err error) (bool, error) {
- // This function is CPU-bound, while it may allocate big amounts of memory.
- // That's why it is a good idea to limit the number of concurrent calls to this function
- // in order to limit memory usage under high load without sacrificing the performance.
- processScrapedDataConcurrencyLimitCh <- struct{}{}
- defer func() {
- <-processScrapedDataConcurrencyLimitCh
- }()
- endTimestamp := time.Now().UnixNano() / 1e6
- duration := float64(endTimestamp-realTimestamp) / 1e3
- scrapeDuration.Update(duration)
- scrapeResponseSize.Update(float64(len(body.B)))
+func (sw *scrapeWork) processDataOneShot(scrapeTimestamp, realTimestamp int64, body []byte, scrapeDurationSeconds float64, err error) error {
up := 1
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
lastScrape := sw.loadLastScrape()
- bodyString := bytesutil.ToUnsafeString(body.B)
+ bodyString := bytesutil.ToUnsafeString(body)
areIdenticalSeries := sw.areIdenticalSeries(lastScrape, bodyString)
if err != nil {
up = 0
@@ -499,7 +499,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
}
am := &autoMetrics{
up: up,
- scrapeDurationSeconds: duration,
+ scrapeDurationSeconds: scrapeDurationSeconds,
samplesScraped: samplesScraped,
samplesPostRelabeling: samplesPostRelabeling,
seriesAdded: seriesAdded,
@@ -510,115 +510,59 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
sw.prevLabelsLen = len(wc.labels)
sw.prevBodyLen = len(bodyString)
wc.reset()
- mustSwitchToStreamParse := sw.mustSwitchToStreamParseMode(len(bodyString))
- if !mustSwitchToStreamParse {
- // Return wc to the pool if the parsed response size was smaller than -promscrape.minResponseSizeForStreamParse
- // This should reduce memory usage when scraping targets with big responses.
- writeRequestCtxPool.Put(wc)
- }
+ writeRequestCtxPool.Put(wc)
// body must be released only after wc is released, since wc refers to body.
if !areIdenticalSeries {
// Send stale markers for disappeared metrics with the real scrape timestamp
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
- sw.storeLastScrape(body.B)
+ sw.storeLastScrape(body)
}
sw.finalizeLastScrape()
- tsmGlobal.Update(sw, up == 1, realTimestamp, int64(duration*1000), samplesScraped, err)
- return !mustSwitchToStreamParse, err
+ tsmGlobal.Update(sw, up == 1, realTimestamp, int64(scrapeDurationSeconds*1000), samplesScraped, err)
+ return err
}
-func (sw *scrapeWork) pushData(at *auth.Token, wr *prompbmarshal.WriteRequest) {
- startTime := time.Now()
- sw.PushData(at, wr)
- pushDataDuration.UpdateDuration(startTime)
-}
-
-type streamBodyReader struct {
- body []byte
- bodyLen int
- readOffset int
-}
-
-func (sbr *streamBodyReader) Init(sr *streamReader) error {
- sbr.body = nil
- sbr.bodyLen = 0
- sbr.readOffset = 0
- // Read the whole response body in memory before parsing it in stream mode.
- // This minimizes the time needed for reading response body from scrape target.
- startTime := fasttime.UnixTimestamp()
- body, err := io.ReadAll(sr)
- if err != nil {
- d := fasttime.UnixTimestamp() - startTime
- return fmt.Errorf("cannot read stream body in %d seconds: %w", d, err)
- }
- sbr.body = body
- sbr.bodyLen = len(body)
- return nil
-}
-
-func (sbr *streamBodyReader) Read(b []byte) (int, error) {
- if sbr.readOffset >= len(sbr.body) {
- return 0, io.EOF
- }
- n := copy(b, sbr.body[sbr.readOffset:])
- sbr.readOffset += n
- return n, nil
-}
-
-func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
+func (sw *scrapeWork) processDataInStreamMode(scrapeTimestamp, realTimestamp int64, body *bytesutil.ByteBuffer, scrapeDurationSeconds float64) error {
samplesScraped := 0
samplesPostRelabeling := 0
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
- // Do not pool sbr and do not pre-allocate sbr.body in order to reduce memory usage when scraping big responses.
- var sbr streamBodyReader
lastScrape := sw.loadLastScrape()
- bodyString := ""
- areIdenticalSeries := true
+ bodyString := bytesutil.ToUnsafeString(body.B)
+ areIdenticalSeries := sw.areIdenticalSeries(lastScrape, bodyString)
samplesDropped := 0
- sr, err := sw.GetStreamReader()
- if err != nil {
- err = fmt.Errorf("cannot read data: %w", err)
- } else {
- var mu sync.Mutex
- err = sbr.Init(sr)
- if err == nil {
- bodyString = bytesutil.ToUnsafeString(sbr.body)
- areIdenticalSeries = sw.areIdenticalSeries(lastScrape, bodyString)
- err = stream.Parse(&sbr, scrapeTimestamp, false, false, func(rows []parser.Row) error {
- mu.Lock()
- defer mu.Unlock()
- samplesScraped += len(rows)
- for i := range rows {
- sw.addRowToTimeseries(wc, &rows[i], scrapeTimestamp, true)
- }
- samplesPostRelabeling += len(wc.writeRequest.Timeseries)
- if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit {
- wc.resetNoRows()
- scrapesSkippedBySampleLimit.Inc()
- return fmt.Errorf("the response from %q exceeds sample_limit=%d; "+
- "either reduce the sample count for the target or increase sample_limit", sw.Config.ScrapeURL, sw.Config.SampleLimit)
- }
- if sw.seriesLimitExceeded || !areIdenticalSeries {
- samplesDropped += sw.applySeriesLimit(wc)
- }
- // Push the collected rows to sw before returning from the callback, since they cannot be held
- // after returning from the callback - this will result in data race.
- // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247
- sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
- wc.resetNoRows()
- return nil
- }, sw.logError)
+
+ r := body.NewReader()
+ var mu sync.Mutex
+ err := stream.Parse(r, scrapeTimestamp, false, false, func(rows []parser.Row) error {
+ mu.Lock()
+ defer mu.Unlock()
+
+ samplesScraped += len(rows)
+ for i := range rows {
+ sw.addRowToTimeseries(wc, &rows[i], scrapeTimestamp, true)
}
- sr.MustClose()
- }
+ samplesPostRelabeling += len(wc.writeRequest.Timeseries)
+ if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit {
+ wc.resetNoRows()
+ scrapesSkippedBySampleLimit.Inc()
+ return fmt.Errorf("the response from %q exceeds sample_limit=%d; "+
+ "either reduce the sample count for the target or increase sample_limit", sw.Config.ScrapeURL, sw.Config.SampleLimit)
+ }
+ if sw.seriesLimitExceeded || !areIdenticalSeries {
+ samplesDropped += sw.applySeriesLimit(wc)
+ }
+
+ // Push the collected rows to sw before returning from the callback, since they cannot be held
+ // after returning from the callback - this will result in data race.
+ // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247
+ sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
+ wc.resetNoRows()
+ return nil
+ }, sw.logError)
scrapedSamples.Update(float64(samplesScraped))
- endTimestamp := time.Now().UnixNano() / 1e6
- duration := float64(endTimestamp-realTimestamp) / 1e3
- scrapeDuration.Update(duration)
- scrapeResponseSize.Update(float64(sbr.bodyLen))
up := 1
if err != nil {
// Mark the scrape as failed even if it already read and pushed some samples
@@ -635,7 +579,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
}
am := &autoMetrics{
up: up,
- scrapeDurationSeconds: duration,
+ scrapeDurationSeconds: scrapeDurationSeconds,
samplesScraped: samplesScraped,
samplesPostRelabeling: samplesPostRelabeling,
seriesAdded: seriesAdded,
@@ -644,22 +588,28 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
sw.addAutoMetrics(am, wc, scrapeTimestamp)
sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
sw.prevLabelsLen = len(wc.labels)
- sw.prevBodyLen = sbr.bodyLen
+ sw.prevBodyLen = len(bodyString)
wc.reset()
writeRequestCtxPool.Put(wc)
if !areIdenticalSeries {
// Send stale markers for disappeared metrics with the real scrape timestamp
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
- sw.storeLastScrape(sbr.body)
+ sw.storeLastScrape(body.B)
}
sw.finalizeLastScrape()
- tsmGlobal.Update(sw, up == 1, realTimestamp, int64(duration*1000), samplesScraped, err)
+ tsmGlobal.Update(sw, up == 1, realTimestamp, int64(scrapeDurationSeconds*1000), samplesScraped, err)
// Do not track active series in streaming mode, since this may need too big amounts of memory
// when the target exports too big number of metrics.
return err
}
+func (sw *scrapeWork) pushData(at *auth.Token, wr *prompbmarshal.WriteRequest) {
+ startTime := time.Now()
+ sw.PushData(at, wr)
+ pushDataDuration.UpdateDuration(startTime)
+}
+
func (sw *scrapeWork) areIdenticalSeries(prevData, currData string) bool {
if sw.Config.NoStaleMarkers && sw.Config.SeriesLimit <= 0 {
// Do not spend CPU time on tracking the changes in series if stale markers are disabled.
diff --git a/lib/promscrape/scrapework_test.go b/lib/promscrape/scrapework_test.go
index 11c31473a..af874380e 100644
--- a/lib/promscrape/scrapework_test.go
+++ b/lib/promscrape/scrapework_test.go
@@ -7,6 +7,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
+ "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
@@ -89,9 +90,9 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) {
}
readDataCalls := 0
- sw.ReadData = func(dst []byte) ([]byte, error) {
+ sw.ReadData = func(dst *bytesutil.ByteBuffer) error {
readDataCalls++
- return dst, fmt.Errorf("error when reading data")
+ return fmt.Errorf("error when reading data")
}
pushDataCalls := 0
@@ -130,10 +131,10 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
sw.Config = cfg
readDataCalls := 0
- sw.ReadData = func(dst []byte) ([]byte, error) {
+ sw.ReadData = func(dst *bytesutil.ByteBuffer) error {
readDataCalls++
- dst = append(dst, data...)
- return dst, nil
+ dst.B = append(dst.B, data...)
+ return nil
}
pushDataCalls := 0
diff --git a/lib/promscrape/scrapework_timing_test.go b/lib/promscrape/scrapework_timing_test.go
index b488a11de..2902b3e36 100644
--- a/lib/promscrape/scrapework_timing_test.go
+++ b/lib/promscrape/scrapework_timing_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
+ "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
@@ -73,8 +74,9 @@ vm_tcplistener_read_timeouts_total{name="https", addr=":443"} 12353
vm_tcplistener_write_calls_total{name="http", addr=":80"} 3996
vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356
`
- readDataFunc := func(dst []byte) ([]byte, error) {
- return append(dst, data...), nil
+ readDataFunc := func(dst *bytesutil.ByteBuffer) error {
+ dst.B = append(dst.B, data...)
+ return nil
}
b.ReportAllocs()
b.SetBytes(int64(len(data)))
diff --git a/lib/promscrape/statconn.go b/lib/promscrape/statconn.go
index 7aaf28580..6167646da 100644
--- a/lib/promscrape/statconn.go
+++ b/lib/promscrape/statconn.go
@@ -11,9 +11,6 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
- "github.com/VictoriaMetrics/fasthttp"
"github.com/VictoriaMetrics/metrics"
)
@@ -52,30 +49,6 @@ var (
stdDialerOnce sync.Once
)
-func newStatDialFunc(proxyURL *proxy.URL, ac *promauth.Config) (fasthttp.DialFunc, error) {
- dialFunc, err := proxyURL.NewDialFunc(ac)
- if err != nil {
- return nil, err
- }
- statDialFunc := func(addr string) (net.Conn, error) {
- conn, err := dialFunc(addr)
- dialsTotal.Inc()
- if err != nil {
- dialErrors.Inc()
- if !netutil.TCP6Enabled() && !isTCPv4Addr(addr) {
- err = fmt.Errorf("%w; try -enableTCP6 command-line flag if you scrape ipv6 addresses", err)
- }
- return nil, err
- }
- conns.Inc()
- sc := &statConn{
- Conn: conn,
- }
- return sc, nil
- }
- return statDialFunc, nil
-}
-
var (
dialsTotal = metrics.NewCounter(`vm_promscrape_dials_total`)
dialErrors = metrics.NewCounter(`vm_promscrape_dial_errors_total`)
diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go
index fb302ff12..9bf9103c7 100644
--- a/lib/proxy/proxy.go
+++ b/lib/proxy/proxy.go
@@ -1,21 +1,13 @@
package proxy
import (
- "bufio"
- "crypto/tls"
"encoding/base64"
"fmt"
- "net"
"net/http"
"net/url"
- "strings"
- "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
- "github.com/VictoriaMetrics/fasthttp"
- "golang.org/x/net/proxy"
)
var validURLSchemes = []string{"http", "https", "socks5", "tls+socks5"}
@@ -84,18 +76,6 @@ func (u *URL) SetHeaders(ac *promauth.Config, req *http.Request) error {
return ac.SetHeaders(req, false)
}
-// SetFasthttpHeaders sets headers to req according to u and ac configs.
-func (u *URL) SetFasthttpHeaders(ac *promauth.Config, req *fasthttp.Request) error {
- ah, err := u.getAuthHeader(ac)
- if err != nil {
- return fmt.Errorf("cannot obtain Proxy-Authorization headers: %w", err)
- }
- if ah != "" {
- req.Header.Set("Proxy-Authorization", ah)
- }
- return ac.SetFasthttpHeaders(req, false)
-}
-
// getAuthHeader returns Proxy-Authorization auth header for the given u and ac.
func (u *URL) getAuthHeader(ac *promauth.Config) (string, error) {
authHeader := ""
@@ -141,136 +121,3 @@ func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error {
u.URL = parsedURL
return nil
}
-
-// NewDialFunc returns dial func for the given u and ac.
-func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) {
- if u == nil || u.URL == nil {
- return defaultDialFunc, nil
- }
- pu := u.URL
- if !isURLSchemeValid(pu.Scheme) {
- return nil, fmt.Errorf("unknown scheme=%q for proxy_url=%q, must be in %s", pu.Scheme, pu.Redacted(), validURLSchemes)
- }
- isTLS := (pu.Scheme == "https" || pu.Scheme == "tls+socks5")
- proxyAddr := addMissingPort(pu.Host, isTLS)
- var tlsCfg *tls.Config
- if isTLS {
- var err error
- tlsCfg, err = ac.NewTLSConfig()
- if err != nil {
- return nil, fmt.Errorf("cannot initialize tls config: %w", err)
- }
- if !tlsCfg.InsecureSkipVerify && tlsCfg.ServerName == "" {
- tlsCfg.ServerName = tlsServerName(proxyAddr)
- }
- }
- if pu.Scheme == "socks5" || pu.Scheme == "tls+socks5" {
- return socks5DialFunc(proxyAddr, pu, tlsCfg)
- }
- dialFunc := func(addr string) (net.Conn, error) {
- proxyConn, err := defaultDialFunc(proxyAddr)
- if err != nil {
- return nil, fmt.Errorf("cannot connect to proxy %q: %w", pu.Redacted(), err)
- }
- if isTLS {
- proxyConn = tls.Client(proxyConn, tlsCfg)
- }
- authHeader, err := u.getAuthHeader(ac)
- if err != nil {
- return nil, fmt.Errorf("cannot obtain Proxy-Authorization header: %w", err)
- }
- if authHeader != "" {
- authHeader = "Proxy-Authorization: " + authHeader + "\r\n"
- authHeader += ac.HeadersNoAuthString()
- }
- conn, err := sendConnectRequest(proxyConn, proxyAddr, addr, authHeader)
- if err != nil {
- _ = proxyConn.Close()
- return nil, fmt.Errorf("error when sending CONNECT request to proxy %q: %w", pu.Redacted(), err)
- }
- return conn, nil
- }
- return dialFunc, nil
-}
-
-func socks5DialFunc(proxyAddr string, pu *url.URL, tlsCfg *tls.Config) (fasthttp.DialFunc, error) {
- var sac *proxy.Auth
- if pu.User != nil {
- username := pu.User.Username()
- password, _ := pu.User.Password()
- sac = &proxy.Auth{
- User: username,
- Password: password,
- }
- }
- network := netutil.GetTCPNetwork()
- var dialer proxy.Dialer = proxy.Direct
- if tlsCfg != nil {
- dialer = &tls.Dialer{
- Config: tlsCfg,
- }
- }
- d, err := proxy.SOCKS5(network, proxyAddr, sac, dialer)
- if err != nil {
- return nil, fmt.Errorf("cannot create socks5 proxy for url: %s, err: %w", pu.Redacted(), err)
- }
- dialFunc := func(addr string) (net.Conn, error) {
- return d.Dial(network, addr)
- }
- return dialFunc, nil
-}
-
-func addMissingPort(addr string, isTLS bool) string {
- if strings.IndexByte(addr, ':') >= 0 {
- return addr
- }
- port := "80"
- if isTLS {
- port = "443"
- }
- return addr + ":" + port
-}
-
-func tlsServerName(addr string) string {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return addr
- }
- return host
-}
-
-func defaultDialFunc(addr string) (net.Conn, error) {
- network := netutil.GetTCPNetwork()
- // Do not use fasthttp.Dial because of https://github.com/VictoriaMetrics/VictoriaMetrics/issues/987
- return net.DialTimeout(network, addr, 5*time.Second)
-}
-
-// sendConnectRequest sends CONNECT request to proxyConn for the given addr and authHeader and returns the established connection to dstAddr.
-func sendConnectRequest(proxyConn net.Conn, proxyAddr, dstAddr, authHeader string) (net.Conn, error) {
- req := "CONNECT " + dstAddr + " HTTP/1.1\r\nHost: " + proxyAddr + "\r\n" + authHeader + "\r\n"
- if _, err := proxyConn.Write([]byte(req)); err != nil {
- return nil, fmt.Errorf("cannot send CONNECT request for dstAddr=%q: %w", dstAddr, err)
- }
- var res fasthttp.Response
- res.SkipBody = true
- conn := &bufferedReaderConn{
- br: bufio.NewReader(proxyConn),
- Conn: proxyConn,
- }
- if err := res.Read(conn.br); err != nil {
- return nil, fmt.Errorf("cannot read CONNECT response for dstAddr=%q: %w", dstAddr, err)
- }
- if statusCode := res.Header.StatusCode(); statusCode != 200 {
- return nil, fmt.Errorf("unexpected status code received: %d; want: 200; response body: %q", statusCode, res.Body())
- }
- return conn, nil
-}
-
-type bufferedReaderConn struct {
- net.Conn
- br *bufio.Reader
-}
-
-func (brc *bufferedReaderConn) Read(p []byte) (int, error) {
- return brc.br.Read(p)
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/.gitignore b/vendor/github.com/VictoriaMetrics/fasthttp/.gitignore
deleted file mode 100644
index 7b58ce45b..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-tags
-*.pprof
-*.fasthttp.gz
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/.travis.yml b/vendor/github.com/VictoriaMetrics/fasthttp/.travis.yml
deleted file mode 100644
index 3ed568b14..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-language: go
-
-go:
- - 1.9.x
- - 1.8.x
-
-script:
- # build test for supported platforms
- - GOOS=linux go build
- - GOOS=darwin go build
- - GOOS=freebsd go build
- - GOOS=windows go build
- - GOARCH=386 go build
-
- # run tests on a standard platform
- - go test -v ./...
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/LICENSE b/vendor/github.com/VictoriaMetrics/fasthttp/LICENSE
deleted file mode 100644
index 22bf00cb4..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015-2016 Aliaksandr Valialkin, VertaMedia
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/README.md b/vendor/github.com/VictoriaMetrics/fasthttp/README.md
deleted file mode 100644
index 2fe7f4074..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Private copy of [fasthttp](https://github.com/valyala/fasthttp) for VictoriaMetrics usage.
-
-It contains only the functionality required for [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
-
-Do not use it in your own projects!
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/TODO b/vendor/github.com/VictoriaMetrics/fasthttp/TODO
deleted file mode 100644
index ce7505f1c..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-- SessionClient with referer and cookies support.
-- ProxyHandler similar to FSHandler.
-- WebSockets. See https://tools.ietf.org/html/rfc6455 .
-- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 .
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/args.go b/vendor/github.com/VictoriaMetrics/fasthttp/args.go
deleted file mode 100644
index 5d432f5f9..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/args.go
+++ /dev/null
@@ -1,517 +0,0 @@
-package fasthttp
-
-import (
- "bytes"
- "errors"
- "io"
- "sync"
-)
-
-// AcquireArgs returns an empty Args object from the pool.
-//
-// The returned Args may be returned to the pool with ReleaseArgs
-// when no longer needed. This allows reducing GC load.
-func AcquireArgs() *Args {
- return argsPool.Get().(*Args)
-}
-
-// ReleaseArgs returns the object acquired via AquireArgs to the pool.
-//
-// Do not access the released Args object, otherwise data races may occur.
-func ReleaseArgs(a *Args) {
- a.Reset()
- argsPool.Put(a)
-}
-
-var argsPool = &sync.Pool{
- New: func() interface{} {
- return &Args{}
- },
-}
-
-// Args represents query arguments.
-//
-// It is forbidden copying Args instances. Create new instances instead
-// and use CopyTo().
-//
-// Args instance MUST NOT be used from concurrently running goroutines.
-type Args struct {
- noCopy noCopy
-
- args []argsKV
- buf []byte
-}
-
-type argsKV struct {
- key []byte
- value []byte
-}
-
-// Reset clears query args.
-func (a *Args) Reset() {
- a.args = a.args[:0]
-}
-
-// CopyTo copies all args to dst.
-func (a *Args) CopyTo(dst *Args) {
- dst.Reset()
- dst.args = copyArgs(dst.args, a.args)
-}
-
-// VisitAll calls f for each existing arg.
-//
-// f must not retain references to key and value after returning.
-// Make key and/or value copies if you need storing them after returning.
-func (a *Args) VisitAll(f func(key, value []byte)) {
- visitArgs(a.args, f)
-}
-
-// Len returns the number of query args.
-func (a *Args) Len() int {
- return len(a.args)
-}
-
-// Parse parses the given string containing query args.
-func (a *Args) Parse(s string) {
- a.buf = append(a.buf[:0], s...)
- a.ParseBytes(a.buf)
-}
-
-// ParseBytes parses the given b containing query args.
-func (a *Args) ParseBytes(b []byte) {
- a.Reset()
-
- var s argsScanner
- s.b = b
-
- var kv *argsKV
- a.args, kv = allocArg(a.args)
- for s.next(kv) {
- if len(kv.key) > 0 || len(kv.value) > 0 {
- a.args, kv = allocArg(a.args)
- }
- }
- a.args = releaseArg(a.args)
-}
-
-// String returns string representation of query args.
-func (a *Args) String() string {
- return string(a.QueryString())
-}
-
-// QueryString returns query string for the args.
-//
-// The returned value is valid until the next call to Args methods.
-func (a *Args) QueryString() []byte {
- a.buf = a.AppendBytes(a.buf[:0])
- return a.buf
-}
-
-// AppendBytes appends query string to dst and returns the extended dst.
-func (a *Args) AppendBytes(dst []byte) []byte {
- for i, n := 0, len(a.args); i < n; i++ {
- kv := &a.args[i]
- dst = AppendQuotedArg(dst, kv.key)
- if len(kv.value) > 0 {
- dst = append(dst, '=')
- dst = AppendQuotedArg(dst, kv.value)
- }
- if i+1 < n {
- dst = append(dst, '&')
- }
- }
- return dst
-}
-
-// WriteTo writes query string to w.
-//
-// WriteTo implements io.WriterTo interface.
-func (a *Args) WriteTo(w io.Writer) (int64, error) {
- n, err := w.Write(a.QueryString())
- return int64(n), err
-}
-
-// Del deletes argument with the given key from query args.
-func (a *Args) Del(key string) {
- a.args = delAllArgs(a.args, key)
-}
-
-// DelBytes deletes argument with the given key from query args.
-func (a *Args) DelBytes(key []byte) {
- a.args = delAllArgs(a.args, b2s(key))
-}
-
-// Add adds 'key=value' argument.
-//
-// Multiple values for the same key may be added.
-func (a *Args) Add(key, value string) {
- a.args = appendArg(a.args, key, value)
-}
-
-// AddBytesK adds 'key=value' argument.
-//
-// Multiple values for the same key may be added.
-func (a *Args) AddBytesK(key []byte, value string) {
- a.args = appendArg(a.args, b2s(key), value)
-}
-
-// AddBytesV adds 'key=value' argument.
-//
-// Multiple values for the same key may be added.
-func (a *Args) AddBytesV(key string, value []byte) {
- a.args = appendArg(a.args, key, b2s(value))
-}
-
-// AddBytesKV adds 'key=value' argument.
-//
-// Multiple values for the same key may be added.
-func (a *Args) AddBytesKV(key, value []byte) {
- a.args = appendArg(a.args, b2s(key), b2s(value))
-}
-
-// Set sets 'key=value' argument.
-func (a *Args) Set(key, value string) {
- a.args = setArg(a.args, key, value)
-}
-
-// SetBytesK sets 'key=value' argument.
-func (a *Args) SetBytesK(key []byte, value string) {
- a.args = setArg(a.args, b2s(key), value)
-}
-
-// SetBytesV sets 'key=value' argument.
-func (a *Args) SetBytesV(key string, value []byte) {
- a.args = setArg(a.args, key, b2s(value))
-}
-
-// SetBytesKV sets 'key=value' argument.
-func (a *Args) SetBytesKV(key, value []byte) {
- a.args = setArgBytes(a.args, key, value)
-}
-
-// Peek returns query arg value for the given key.
-//
-// Returned value is valid until the next Args call.
-func (a *Args) Peek(key string) []byte {
- return peekArgStr(a.args, key)
-}
-
-// PeekBytes returns query arg value for the given key.
-//
-// Returned value is valid until the next Args call.
-func (a *Args) PeekBytes(key []byte) []byte {
- return peekArgBytes(a.args, key)
-}
-
-// PeekMulti returns all the arg values for the given key.
-func (a *Args) PeekMulti(key string) [][]byte {
- var values [][]byte
- a.VisitAll(func(k, v []byte) {
- if string(k) == key {
- values = append(values, v)
- }
- })
- return values
-}
-
-// PeekMultiBytes returns all the arg values for the given key.
-func (a *Args) PeekMultiBytes(key []byte) [][]byte {
- return a.PeekMulti(b2s(key))
-}
-
-// Has returns true if the given key exists in Args.
-func (a *Args) Has(key string) bool {
- return hasArg(a.args, key)
-}
-
-// HasBytes returns true if the given key exists in Args.
-func (a *Args) HasBytes(key []byte) bool {
- return hasArg(a.args, b2s(key))
-}
-
-// ErrNoArgValue is returned when Args value with the given key is missing.
-var ErrNoArgValue = errors.New("no Args value for the given key")
-
-// GetUint returns uint value for the given key.
-func (a *Args) GetUint(key string) (int, error) {
- value := a.Peek(key)
- if len(value) == 0 {
- return -1, ErrNoArgValue
- }
- return ParseUint(value)
-}
-
-// SetUint sets uint value for the given key.
-func (a *Args) SetUint(key string, value int) {
- bb := AcquireByteBuffer()
- bb.B = AppendUint(bb.B[:0], value)
- a.SetBytesV(key, bb.B)
- ReleaseByteBuffer(bb)
-}
-
-// SetUintBytes sets uint value for the given key.
-func (a *Args) SetUintBytes(key []byte, value int) {
- a.SetUint(b2s(key), value)
-}
-
-// GetUintOrZero returns uint value for the given key.
-//
-// Zero (0) is returned on error.
-func (a *Args) GetUintOrZero(key string) int {
- n, err := a.GetUint(key)
- if err != nil {
- n = 0
- }
- return n
-}
-
-// GetUfloat returns ufloat value for the given key.
-func (a *Args) GetUfloat(key string) (float64, error) {
- value := a.Peek(key)
- if len(value) == 0 {
- return -1, ErrNoArgValue
- }
- return ParseUfloat(value)
-}
-
-// GetUfloatOrZero returns ufloat value for the given key.
-//
-// Zero (0) is returned on error.
-func (a *Args) GetUfloatOrZero(key string) float64 {
- f, err := a.GetUfloat(key)
- if err != nil {
- f = 0
- }
- return f
-}
-
-// GetBool returns boolean value for the given key.
-//
-// true is returned for '1', 'y' and 'yes' values,
-// otherwise false is returned.
-func (a *Args) GetBool(key string) bool {
- switch string(a.Peek(key)) {
- case "1", "y", "yes":
- return true
- default:
- return false
- }
-}
-
-func visitArgs(args []argsKV, f func(k, v []byte)) {
- for i, n := 0, len(args); i < n; i++ {
- kv := &args[i]
- f(kv.key, kv.value)
- }
-}
-
-func copyArgs(dst, src []argsKV) []argsKV {
- if cap(dst) < len(src) {
- tmp := make([]argsKV, len(src))
- copy(tmp, dst)
- dst = tmp
- }
- n := len(src)
- dst = dst[:n]
- for i := 0; i < n; i++ {
- dstKV := &dst[i]
- srcKV := &src[i]
- dstKV.key = append(dstKV.key[:0], srcKV.key...)
- dstKV.value = append(dstKV.value[:0], srcKV.value...)
- }
- return dst
-}
-
-func delAllArgsBytes(args []argsKV, key []byte) []argsKV {
- return delAllArgs(args, b2s(key))
-}
-
-func delAllArgs(args []argsKV, key string) []argsKV {
- for i, n := 0, len(args); i < n; i++ {
- kv := &args[i]
- if key == string(kv.key) {
- tmp := *kv
- copy(args[i:], args[i+1:])
- n--
- args[n] = tmp
- args = args[:n]
- }
- }
- return args
-}
-
-func setArgBytes(h []argsKV, key, value []byte) []argsKV {
- return setArg(h, b2s(key), b2s(value))
-}
-
-func setArg(h []argsKV, key, value string) []argsKV {
- n := len(h)
- for i := 0; i < n; i++ {
- kv := &h[i]
- if key == string(kv.key) {
- kv.value = append(kv.value[:0], value...)
- return h
- }
- }
- return appendArg(h, key, value)
-}
-
-func appendArgBytes(h []argsKV, key, value []byte) []argsKV {
- return appendArg(h, b2s(key), b2s(value))
-}
-
-func appendArg(args []argsKV, key, value string) []argsKV {
- var kv *argsKV
- args, kv = allocArg(args)
- kv.key = append(kv.key[:0], key...)
- kv.value = append(kv.value[:0], value...)
- return args
-}
-
-func allocArg(h []argsKV) ([]argsKV, *argsKV) {
- n := len(h)
- if cap(h) > n {
- h = h[:n+1]
- } else {
- h = append(h, argsKV{})
- }
- return h, &h[n]
-}
-
-func releaseArg(h []argsKV) []argsKV {
- return h[:len(h)-1]
-}
-
-func hasArg(h []argsKV, key string) bool {
- for i, n := 0, len(h); i < n; i++ {
- kv := &h[i]
- if key == string(kv.key) {
- return true
- }
- }
- return false
-}
-
-func peekArgBytes(h []argsKV, k []byte) []byte {
- for i, n := 0, len(h); i < n; i++ {
- kv := &h[i]
- if bytes.Equal(kv.key, k) {
- return kv.value
- }
- }
- return nil
-}
-
-func peekArgStr(h []argsKV, k string) []byte {
- for i, n := 0, len(h); i < n; i++ {
- kv := &h[i]
- if string(kv.key) == k {
- return kv.value
- }
- }
- return nil
-}
-
-type argsScanner struct {
- b []byte
-}
-
-func (s *argsScanner) next(kv *argsKV) bool {
- if len(s.b) == 0 {
- return false
- }
-
- isKey := true
- k := 0
- for i, c := range s.b {
- switch c {
- case '=':
- if isKey {
- isKey = false
- kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
- k = i + 1
- }
- case '&':
- if isKey {
- kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
- kv.value = kv.value[:0]
- } else {
- kv.value = decodeArgAppend(kv.value[:0], s.b[k:i])
- }
- s.b = s.b[i+1:]
- return true
- }
- }
-
- if isKey {
- kv.key = decodeArgAppend(kv.key[:0], s.b)
- kv.value = kv.value[:0]
- } else {
- kv.value = decodeArgAppend(kv.value[:0], s.b[k:])
- }
- s.b = s.b[len(s.b):]
- return true
-}
-
-func decodeArgAppend(dst, src []byte) []byte {
- if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 {
- // fast path: src doesn't contain encoded chars
- return append(dst, src...)
- }
-
- // slow path
- for i := 0; i < len(src); i++ {
- c := src[i]
- if c == '%' {
- if i+2 >= len(src) {
- return append(dst, src[i:]...)
- }
- x2 := hex2intTable[src[i+2]]
- x1 := hex2intTable[src[i+1]]
- if x1 == 16 || x2 == 16 {
- dst = append(dst, '%')
- } else {
- dst = append(dst, x1<<4|x2)
- i += 2
- }
- } else if c == '+' {
- dst = append(dst, ' ')
- } else {
- dst = append(dst, c)
- }
- }
- return dst
-}
-
-// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't
-// substitute '+' with ' '.
-//
-// The function is copy-pasted from decodeArgAppend due to the preformance
-// reasons only.
-func decodeArgAppendNoPlus(dst, src []byte) []byte {
- if bytes.IndexByte(src, '%') < 0 {
- // fast path: src doesn't contain encoded chars
- return append(dst, src...)
- }
-
- // slow path
- for i := 0; i < len(src); i++ {
- c := src[i]
- if c == '%' {
- if i+2 >= len(src) {
- return append(dst, src[i:]...)
- }
- x2 := hex2intTable[src[i+2]]
- x1 := hex2intTable[src[i+1]]
- if x1 == 16 || x2 == 16 {
- dst = append(dst, '%')
- } else {
- dst = append(dst, x1<<4|x2)
- i += 2
- }
- } else {
- dst = append(dst, c)
- }
- }
- return dst
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/bytebuffer.go b/vendor/github.com/VictoriaMetrics/fasthttp/bytebuffer.go
deleted file mode 100644
index f9651722d..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/bytebuffer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package fasthttp
-
-import (
- "github.com/valyala/bytebufferpool"
-)
-
-// ByteBuffer provides byte buffer, which can be used with fasthttp API
-// in order to minimize memory allocations.
-//
-// ByteBuffer may be used with functions appending data to the given []byte
-// slice. See example code for details.
-//
-// Use AcquireByteBuffer for obtaining an empty byte buffer.
-//
-// ByteBuffer is deprecated. Use github.com/valyala/bytebufferpool instead.
-type ByteBuffer bytebufferpool.ByteBuffer
-
-// Write implements io.Writer - it appends p to ByteBuffer.B
-func (b *ByteBuffer) Write(p []byte) (int, error) {
- return bb(b).Write(p)
-}
-
-// WriteString appends s to ByteBuffer.B
-func (b *ByteBuffer) WriteString(s string) (int, error) {
- return bb(b).WriteString(s)
-}
-
-// Set sets ByteBuffer.B to p
-func (b *ByteBuffer) Set(p []byte) {
- bb(b).Set(p)
-}
-
-// SetString sets ByteBuffer.B to s
-func (b *ByteBuffer) SetString(s string) {
- bb(b).SetString(s)
-}
-
-// Reset makes ByteBuffer.B empty.
-func (b *ByteBuffer) Reset() {
- bb(b).Reset()
-}
-
-// AcquireByteBuffer returns an empty byte buffer from the pool.
-//
-// Acquired byte buffer may be returned to the pool via ReleaseByteBuffer call.
-// This reduces the number of memory allocations required for byte buffer
-// management.
-func AcquireByteBuffer() *ByteBuffer {
- return (*ByteBuffer)(defaultByteBufferPool.Get())
-}
-
-// ReleaseByteBuffer returns byte buffer to the pool.
-//
-// ByteBuffer.B mustn't be touched after returning it to the pool.
-// Otherwise data races occur.
-func ReleaseByteBuffer(b *ByteBuffer) {
- defaultByteBufferPool.Put(bb(b))
-}
-
-func bb(b *ByteBuffer) *bytebufferpool.ByteBuffer {
- return (*bytebufferpool.ByteBuffer)(b)
-}
-
-var defaultByteBufferPool bytebufferpool.Pool
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv.go b/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv.go
deleted file mode 100644
index 65387407a..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv.go
+++ /dev/null
@@ -1,446 +0,0 @@
-package fasthttp
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "math"
- "net"
- "reflect"
- "strings"
- "sync"
- "time"
- "unsafe"
-)
-
-// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst.
-func AppendHTMLEscape(dst []byte, s string) []byte {
- if strings.IndexByte(s, '<') < 0 &&
- strings.IndexByte(s, '>') < 0 &&
- strings.IndexByte(s, '"') < 0 &&
- strings.IndexByte(s, '\'') < 0 {
-
- // fast path - nothing to escape
- return append(dst, s...)
- }
-
- // slow path
- var prev int
- var sub string
- for i, n := 0, len(s); i < n; i++ {
- sub = ""
- switch s[i] {
- case '<':
- sub = "<"
- case '>':
- sub = ">"
- case '"':
- sub = """
- case '\'':
- sub = "'"
- }
- if len(sub) > 0 {
- dst = append(dst, s[prev:i]...)
- dst = append(dst, sub...)
- prev = i + 1
- }
- }
- return append(dst, s[prev:]...)
-}
-
-// AppendHTMLEscapeBytes appends html-escaped s to dst and returns
-// the extended dst.
-func AppendHTMLEscapeBytes(dst, s []byte) []byte {
- return AppendHTMLEscape(dst, b2s(s))
-}
-
-// AppendIPv4 appends string representation of the given ip v4 to dst
-// and returns the extended dst.
-func AppendIPv4(dst []byte, ip net.IP) []byte {
- ip = ip.To4()
- if ip == nil {
- return append(dst, "non-v4 ip passed to AppendIPv4"...)
- }
-
- dst = AppendUint(dst, int(ip[0]))
- for i := 1; i < 4; i++ {
- dst = append(dst, '.')
- dst = AppendUint(dst, int(ip[i]))
- }
- return dst
-}
-
-var errEmptyIPStr = errors.New("empty ip address string")
-
-// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst.
-func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) {
- if len(ipStr) == 0 {
- return dst, errEmptyIPStr
- }
- if len(dst) < net.IPv4len {
- dst = make([]byte, net.IPv4len)
- }
- copy(dst, net.IPv4zero)
- dst = dst.To4()
- if dst == nil {
- panic("BUG: dst must not be nil")
- }
-
- b := ipStr
- for i := 0; i < 3; i++ {
- n := bytes.IndexByte(b, '.')
- if n < 0 {
- return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr)
- }
- v, err := ParseUint(b[:n])
- if err != nil {
- return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
- }
- if v > 255 {
- return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
- }
- dst[i] = byte(v)
- b = b[n+1:]
- }
- v, err := ParseUint(b)
- if err != nil {
- return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
- }
- if v > 255 {
- return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
- }
- dst[3] = byte(v)
-
- return dst, nil
-}
-
-// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date
-// to dst and returns the extended dst.
-func AppendHTTPDate(dst []byte, date time.Time) []byte {
- dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123)
- copy(dst[len(dst)-3:], strGMT)
- return dst
-}
-
-// ParseHTTPDate parses HTTP-compliant (RFC1123) date.
-func ParseHTTPDate(date []byte) (time.Time, error) {
- return time.Parse(time.RFC1123, b2s(date))
-}
-
-// AppendUint appends n to dst and returns the extended dst.
-func AppendUint(dst []byte, n int) []byte {
- if n < 0 {
- panic("BUG: int must be positive")
- }
-
- var b [20]byte
- buf := b[:]
- i := len(buf)
- var q int
- for n >= 10 {
- i--
- q = n / 10
- buf[i] = '0' + byte(n-q*10)
- n = q
- }
- i--
- buf[i] = '0' + byte(n)
-
- dst = append(dst, buf[i:]...)
- return dst
-}
-
-// ParseUint parses uint from buf.
-func ParseUint(buf []byte) (int, error) {
- v, n, err := parseUintBuf(buf)
- if n != len(buf) {
- return -1, errUnexpectedTrailingChar
- }
- return v, err
-}
-
-var (
- errEmptyInt = errors.New("empty integer")
- errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9")
- errUnexpectedTrailingChar = errors.New("unexpected traling char found. Expecting 0-9")
- errTooLongInt = errors.New("too long int")
-)
-
-func parseUintBuf(b []byte) (int, int, error) {
- n := len(b)
- if n == 0 {
- return -1, 0, errEmptyInt
- }
- v := 0
- for i := 0; i < n; i++ {
- c := b[i]
- k := c - '0'
- if k > 9 {
- if i == 0 {
- return -1, i, errUnexpectedFirstChar
- }
- return v, i, nil
- }
- if i >= maxIntChars {
- return -1, i, errTooLongInt
- }
- v = 10*v + int(k)
- }
- return v, n, nil
-}
-
-var (
- errEmptyFloat = errors.New("empty float number")
- errDuplicateFloatPoint = errors.New("duplicate point found in float number")
- errUnexpectedFloatEnd = errors.New("unexpected end of float number")
- errInvalidFloatExponent = errors.New("invalid float number exponent")
- errUnexpectedFloatChar = errors.New("unexpected char found in float number")
-)
-
-// ParseUfloat parses unsigned float from buf.
-func ParseUfloat(buf []byte) (float64, error) {
- if len(buf) == 0 {
- return -1, errEmptyFloat
- }
- b := buf
- var v uint64
- var offset = 1.0
- var pointFound bool
- for i, c := range b {
- if c < '0' || c > '9' {
- if c == '.' {
- if pointFound {
- return -1, errDuplicateFloatPoint
- }
- pointFound = true
- continue
- }
- if c == 'e' || c == 'E' {
- if i+1 >= len(b) {
- return -1, errUnexpectedFloatEnd
- }
- b = b[i+1:]
- minus := -1
- switch b[0] {
- case '+':
- b = b[1:]
- minus = 1
- case '-':
- b = b[1:]
- default:
- minus = 1
- }
- vv, err := ParseUint(b)
- if err != nil {
- return -1, errInvalidFloatExponent
- }
- return float64(v) * offset * math.Pow10(minus*int(vv)), nil
- }
- return -1, errUnexpectedFloatChar
- }
- v = 10*v + uint64(c-'0')
- if pointFound {
- offset /= 10
- }
- }
- return float64(v) * offset, nil
-}
-
-var (
- errEmptyHexNum = errors.New("empty hex number")
- errTooLargeHexNum = errors.New("too large hex number")
-)
-
-func readHexInt(r *bufio.Reader) (int, error) {
- n := 0
- i := 0
- var k int
- for {
- c, err := r.ReadByte()
- if err != nil {
- if err == io.EOF && i > 0 {
- return n, nil
- }
- return -1, err
- }
- k = int(hex2intTable[c])
- if k == 16 {
- if i == 0 {
- return -1, errEmptyHexNum
- }
- r.UnreadByte()
- return n, nil
- }
- if i >= maxHexIntChars {
- return -1, errTooLargeHexNum
- }
- n = (n << 4) | k
- i++
- }
-}
-
-var hexIntBufPool sync.Pool
-
-func writeHexInt(w *bufio.Writer, n int) error {
- if n < 0 {
- panic("BUG: int must be positive")
- }
-
- v := hexIntBufPool.Get()
- if v == nil {
- v = make([]byte, maxHexIntChars+1)
- }
- buf := v.([]byte)
- i := len(buf) - 1
- for {
- buf[i] = int2hexbyte(n & 0xf)
- n >>= 4
- if n == 0 {
- break
- }
- i--
- }
- _, err := w.Write(buf[i:])
- hexIntBufPool.Put(v)
- return err
-}
-
-func int2hexbyte(n int) byte {
- if n < 10 {
- return '0' + byte(n)
- }
- return 'a' + byte(n) - 10
-}
-
-func hexCharUpper(c byte) byte {
- if c < 10 {
- return '0' + c
- }
- return c - 10 + 'A'
-}
-
-var hex2intTable = func() []byte {
- b := make([]byte, 256)
- for i := 0; i < 256; i++ {
- c := byte(16)
- if i >= '0' && i <= '9' {
- c = byte(i) - '0'
- } else if i >= 'a' && i <= 'f' {
- c = byte(i) - 'a' + 10
- } else if i >= 'A' && i <= 'F' {
- c = byte(i) - 'A' + 10
- }
- b[i] = c
- }
- return b
-}()
-
-const toLower = 'a' - 'A'
-
-var toLowerTable = func() [256]byte {
- var a [256]byte
- for i := 0; i < 256; i++ {
- c := byte(i)
- if c >= 'A' && c <= 'Z' {
- c += toLower
- }
- a[i] = c
- }
- return a
-}()
-
-var toUpperTable = func() [256]byte {
- var a [256]byte
- for i := 0; i < 256; i++ {
- c := byte(i)
- if c >= 'a' && c <= 'z' {
- c -= toLower
- }
- a[i] = c
- }
- return a
-}()
-
-func lowercaseBytes(b []byte) {
- for i := 0; i < len(b); i++ {
- p := &b[i]
- *p = toLowerTable[*p]
- }
-}
-
-// b2s converts byte slice to a string without memory allocation.
-// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ .
-//
-// Note it may break if string and/or slice header will change
-// in the future go versions.
-func b2s(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// s2b converts string to a byte slice without memory allocation.
-//
-// Note it may break if string and/or slice header will change
-// in the future go versions.
-func s2b(s string) (b []byte) {
- sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = sh.Data
- bh.Len = sh.Len
- bh.Cap = sh.Len
- return b
-}
-
-// AppendUnquotedArg appends url-decoded src to dst and returns appended dst.
-//
-// dst may point to src. In this case src will be overwritten.
-func AppendUnquotedArg(dst, src []byte) []byte {
- return decodeArgAppend(dst, src)
-}
-
-// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
-func AppendQuotedArg(dst, src []byte) []byte {
- for _, c := range src {
- // See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm
- if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
- c == '*' || c == '-' || c == '.' || c == '_' {
- dst = append(dst, c)
- } else {
- dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
- }
- }
- return dst
-}
-
-func appendQuotedPath(dst, src []byte) []byte {
- for _, c := range src {
- if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
- c == '/' || c == '.' || c == ',' || c == '=' || c == ':' || c == '&' || c == '~' || c == '-' || c == '_' {
- dst = append(dst, c)
- } else {
- dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
- }
- }
- return dst
-}
-
-// EqualBytesStr returns true if string(b) == s.
-//
-// This function has no performance benefits comparing to string(b) == s.
-// It is left here for backwards compatibility only.
-//
-// This function is deperecated and may be deleted soon.
-func EqualBytesStr(b []byte, s string) bool {
- return string(b) == s
-}
-
-// AppendBytesStr appends src to dst and returns the extended dst.
-//
-// This function has no performance benefits comparing to append(dst, src...).
-// It is left here for backwards compatibility only.
-//
-// This function is deprecated and may be deleted soon.
-func AppendBytesStr(dst []byte, src string) []byte {
- return append(dst, src...)
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv_32.go b/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv_32.go
deleted file mode 100644
index 6b527f9c8..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv_32.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build !amd64 && !arm64 && !ppc64
-// +build !amd64,!arm64,!ppc64
-
-package fasthttp
-
-const (
- maxIntChars = 9
- maxHexIntChars = 7
-)
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv_64.go b/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv_64.go
deleted file mode 100644
index 870549e19..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/bytesconv_64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build amd64 || arm64 || ppc64
-// +build amd64 arm64 ppc64
-
-package fasthttp
-
-const (
- maxIntChars = 18
- maxHexIntChars = 15
-)
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/client.go b/vendor/github.com/VictoriaMetrics/fasthttp/client.go
deleted file mode 100644
index a88e56cc5..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/client.go
+++ /dev/null
@@ -1,2143 +0,0 @@
-package fasthttp
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "net"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Do performs the given http request and fills the given http response.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// Client determines the server to be requested in the following order:
-//
-// - from RequestURI if it contains full url with scheme and host;
-// - from Host header otherwise.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections
-// to the requested host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func Do(req *Request, resp *Response) error {
- return defaultClient.Do(req, resp)
-}
-
-// DoTimeout performs the given request and waits for response during
-// the given timeout duration.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// Client determines the server to be requested in the following order:
-//
-// - from RequestURI if it contains full url with scheme and host;
-// - from Host header otherwise.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned during
-// the given timeout.
-//
-// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections
-// to the requested host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
- return defaultClient.DoTimeout(req, resp, timeout)
-}
-
-// DoDeadline performs the given request and waits for response until
-// the given deadline.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// Client determines the server to be requested in the following order:
-//
-// - from RequestURI if it contains full url with scheme and host;
-// - from Host header otherwise.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned until
-// the given deadline.
-//
-// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections
-// to the requested host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- return defaultClient.DoDeadline(req, resp, deadline)
-}
-
-// Get appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-func Get(dst []byte, url string) (statusCode int, body []byte, err error) {
- return defaultClient.Get(dst, url)
-}
-
-// GetTimeout appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// ErrTimeout error is returned if url contents couldn't be fetched
-// during the given timeout.
-func GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) {
- return defaultClient.GetTimeout(dst, url, timeout)
-}
-
-// GetDeadline appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// ErrTimeout error is returned if url contents couldn't be fetched
-// until the given deadline.
-func GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) {
- return defaultClient.GetDeadline(dst, url, deadline)
-}
-
-// Post sends POST request to the given url with the given POST arguments.
-//
-// Response body is appended to dst, which is returned as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// Empty POST body is sent if postArgs is nil.
-func Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) {
- return defaultClient.Post(dst, url, postArgs)
-}
-
-var defaultClient Client
-
-// Client implements http client.
-//
-// Copying Client by value is prohibited. Create new instance instead.
-//
-// It is safe calling Client methods from concurrently running goroutines.
-type Client struct {
- noCopy noCopy
-
- // Client name. Used in User-Agent request header.
- //
- // Default client name is used if not set.
- Name string
-
- // Callback for establishing new connections to hosts.
- //
- // Default Dial is used if not set.
- Dial DialFunc
-
- // Attempt to connect to both ipv4 and ipv6 addresses if set to true.
- //
- // This option is used only if default TCP dialer is used,
- // i.e. if Dial is blank.
- //
- // By default client connects only to ipv4 addresses,
- // since unfortunately ipv6 remains broken in many networks worldwide :)
- DialDualStack bool
-
- // TLS config for https connections.
- //
- // Default TLS config is used if not set.
- TLSConfig *tls.Config
-
- // Maximum number of connections per each host which may be established.
- //
- // DefaultMaxConnsPerHost is used if not set.
- MaxConnsPerHost int
-
- // Idle keep-alive connections are closed after this duration.
- //
- // By default idle connections are closed
- // after DefaultMaxIdleConnDuration.
- MaxIdleConnDuration time.Duration
-
- // Per-connection buffer size for responses' reading.
- // This also limits the maximum header size.
- //
- // Default buffer size is used if 0.
- ReadBufferSize int
-
- // Per-connection buffer size for requests' writing.
- //
- // Default buffer size is used if 0.
- WriteBufferSize int
-
- // Maximum duration for full response reading (including body).
- //
- // By default response read timeout is unlimited.
- ReadTimeout time.Duration
-
- // Maximum duration for full request writing (including body).
- //
- // By default request write timeout is unlimited.
- WriteTimeout time.Duration
-
- // Maximum response body size.
- //
- // The client returns ErrBodyTooLarge if this limit is greater than 0
- // and response body is greater than the limit.
- //
- // By default response body size is unlimited.
- MaxResponseBodySize int
-
- // The maximum number of idempotent requests the client can make.
- MaxIdempotentRequestAttempts int
-
- mLock sync.Mutex
- m map[string]*HostClient
- ms map[string]*HostClient
-}
-
-// Get appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-func (c *Client) Get(dst []byte, url string) (statusCode int, body []byte, err error) {
- return clientGetURL(dst, url, c)
-}
-
-// GetTimeout appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// ErrTimeout error is returned if url contents couldn't be fetched
-// during the given timeout.
-func (c *Client) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) {
- return clientGetURLTimeout(dst, url, timeout, c)
-}
-
-// GetDeadline appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// ErrTimeout error is returned if url contents couldn't be fetched
-// until the given deadline.
-func (c *Client) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) {
- return clientGetURLDeadline(dst, url, deadline, c)
-}
-
-// Post sends POST request to the given url with the given POST arguments.
-//
-// Response body is appended to dst, which is returned as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// Empty POST body is sent if postArgs is nil.
-func (c *Client) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) {
- return clientPostURL(dst, url, postArgs, c)
-}
-
-// DoTimeout performs the given request and waits for response during
-// the given timeout duration.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// Client determines the server to be requested in the following order:
-//
-// - from RequestURI if it contains full url with scheme and host;
-// - from Host header otherwise.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned during
-// the given timeout.
-//
-// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections
-// to the requested host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *Client) DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
- return clientDoTimeout(req, resp, timeout, c)
-}
-
-// DoDeadline performs the given request and waits for response until
-// the given deadline.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// Client determines the server to be requested in the following order:
-//
-// - from RequestURI if it contains full url with scheme and host;
-// - from Host header otherwise.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned until
-// the given deadline.
-//
-// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections
-// to the requested host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *Client) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- return clientDoDeadline(req, resp, deadline, c)
-}
-
-// Do performs the given http request and fills the given http response.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// Client determines the server to be requested in the following order:
-//
-// - from RequestURI if it contains full url with scheme and host;
-// - from Host header otherwise.
-//
-// Response is ignored if resp is nil.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections
-// to the requested host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *Client) Do(req *Request, resp *Response) error {
- uri := req.URI()
- host := uri.Host()
-
- isTLS := false
- scheme := uri.Scheme()
- if bytes.Equal(scheme, strHTTPS) {
- isTLS = true
- } else if !bytes.Equal(scheme, strHTTP) {
- return fmt.Errorf("unsupported protocol %q. http and https are supported", scheme)
- }
-
- startCleaner := false
-
- c.mLock.Lock()
- m := c.m
- if isTLS {
- m = c.ms
- }
- if m == nil {
- m = make(map[string]*HostClient)
- if isTLS {
- c.ms = m
- } else {
- c.m = m
- }
- }
- hc := m[string(host)]
- if hc == nil {
- hc = &HostClient{
- Addr: addMissingPort(string(host), isTLS),
- Name: c.Name,
- Dial: c.Dial,
- DialDualStack: c.DialDualStack,
- IsTLS: isTLS,
- TLSConfig: c.TLSConfig,
- MaxConns: c.MaxConnsPerHost,
- MaxIdleConnDuration: c.MaxIdleConnDuration,
- ReadBufferSize: c.ReadBufferSize,
- WriteBufferSize: c.WriteBufferSize,
- ReadTimeout: c.ReadTimeout,
- WriteTimeout: c.WriteTimeout,
- MaxResponseBodySize: c.MaxResponseBodySize,
- MaxIdempotentRequestAttempts: c.MaxIdempotentRequestAttempts,
- }
- m[string(host)] = hc
- if len(m) == 1 {
- startCleaner = true
- }
- }
- c.mLock.Unlock()
-
- if startCleaner {
- go c.mCleaner(m)
- }
-
- return hc.Do(req, resp)
-}
-
-func (c *Client) mCleaner(m map[string]*HostClient) {
- mustStop := false
- for {
- t := time.Now()
- c.mLock.Lock()
- for k, v := range m {
- if t.Sub(v.LastUseTime()) > time.Minute {
- delete(m, k)
- }
- }
- if len(m) == 0 {
- mustStop = true
- }
- c.mLock.Unlock()
-
- if mustStop {
- break
- }
- time.Sleep(10 * time.Second)
- }
-}
-
-// DefaultMaxConnsPerHost is the maximum number of concurrent connections
-// http client may establish per host by default (i.e. if
-// Client.MaxConnsPerHost isn't set).
-const DefaultMaxConnsPerHost = 512
-
-// DefaultMaxIdleConnDuration is the default duration before idle keep-alive
-// connection is closed.
-const DefaultMaxIdleConnDuration = 10 * time.Second
-
-// DialFunc must establish connection to addr.
-//
-// There is no need in establishing TLS (SSL) connection for https.
-// The client automatically converts connection to TLS
-// if HostClient.IsTLS is set.
-//
-// TCP address passed to DialFunc always contains host and port.
-// Example TCP addr values:
-//
-// - foobar.com:80
-// - foobar.com:443
-// - foobar.com:8080
-type DialFunc func(addr string) (net.Conn, error)
-
-// HostClient balances http requests among hosts listed in Addr.
-//
-// HostClient may be used for balancing load among multiple upstream hosts.
-// While multiple addresses passed to HostClient.Addr may be used for balancing
-// load among them, it would be better using LBClient instead, since HostClient
-// may unevenly balance load among upstream hosts.
-//
-// It is forbidden copying HostClient instances. Create new instances instead.
-//
-// It is safe calling HostClient methods from concurrently running goroutines.
-type HostClient struct {
- noCopy noCopy
-
- // Comma-separated list of upstream HTTP server host addresses,
- // which are passed to Dial in a round-robin manner.
- //
- // Each address may contain port if default dialer is used.
- // For example,
- //
- // - foobar.com:80
- // - foobar.com:443
- // - foobar.com:8080
- Addr string
-
- // Client name. Used in User-Agent request header.
- Name string
-
- // Callback for establishing new connection to the host.
- //
- // Default Dial is used if not set.
- Dial DialFunc
-
- // Attempt to connect to both ipv4 and ipv6 host addresses
- // if set to true.
- //
- // This option is used only if default TCP dialer is used,
- // i.e. if Dial is blank.
- //
- // By default client connects only to ipv4 addresses,
- // since unfortunately ipv6 remains broken in many networks worldwide :)
- DialDualStack bool
-
- // Whether to use TLS (aka SSL or HTTPS) for host connections.
- IsTLS bool
-
- // Optional TLS config.
- TLSConfig *tls.Config
-
- // Maximum number of connections which may be established to all hosts
- // listed in Addr.
- //
- // DefaultMaxConnsPerHost is used if not set.
- MaxConns int
-
- // Keep-alive connections are closed after this duration.
- //
- // By default connection duration is unlimited.
- MaxConnDuration time.Duration
-
- // Idle keep-alive connections are closed after this duration.
- //
- // By default idle connections are closed
- // after DefaultMaxIdleConnDuration.
- MaxIdleConnDuration time.Duration
-
- // Per-connection buffer size for responses' reading.
- // This also limits the maximum header size.
- //
- // Default buffer size is used if 0.
- ReadBufferSize int
-
- // Per-connection buffer size for requests' writing.
- //
- // Default buffer size is used if 0.
- WriteBufferSize int
-
- // Maximum duration for full response reading (including body).
- //
- // By default response read timeout is unlimited.
- ReadTimeout time.Duration
-
- // Maximum duration for full request writing (including body).
- //
- // By default request write timeout is unlimited.
- WriteTimeout time.Duration
-
- // Maximum response body size.
- //
- // The client returns ErrBodyTooLarge if this limit is greater than 0
- // and response body is greater than the limit.
- //
- // By default response body size is unlimited.
- MaxResponseBodySize int
-
- // The maximum number of idempotent requests the client can make.
- MaxIdempotentRequestAttempts int
-
- clientName atomic.Value
- lastUseTime uint32
-
- connsLock sync.Mutex
- connsCount int
- conns []*clientConn
-
- addrsLock sync.Mutex
- addrs []string
- addrIdx uint32
-
- tlsConfigMap map[string]*tls.Config
- tlsConfigMapLock sync.Mutex
-
- readerPool sync.Pool
- writerPool sync.Pool
-
- pendingRequests uint64
-
- connsCleanerRun bool
-}
-
-type clientConn struct {
- c net.Conn
-
- createdTime time.Time
- lastUseTime time.Time
-
- lastReadDeadlineTime time.Time
- lastWriteDeadlineTime time.Time
-}
-
-func (cc *clientConn) reset() {
- cc.c = nil
- cc.createdTime = zeroTime
- cc.lastUseTime = zeroTime
- cc.lastReadDeadlineTime = zeroTime
- cc.lastWriteDeadlineTime = zeroTime
-}
-
-var startTimeUnix = time.Now().Unix()
-
-// LastUseTime returns time the client was last used
-func (c *HostClient) LastUseTime() time.Time {
- n := atomic.LoadUint32(&c.lastUseTime)
- return time.Unix(startTimeUnix+int64(n), 0)
-}
-
-// Get appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-func (c *HostClient) Get(dst []byte, url string) (statusCode int, body []byte, err error) {
- return clientGetURL(dst, url, c)
-}
-
-// GetTimeout appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// ErrTimeout error is returned if url contents couldn't be fetched
-// during the given timeout.
-func (c *HostClient) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) {
- return clientGetURLTimeout(dst, url, timeout, c)
-}
-
-// GetDeadline appends url contents to dst and returns it as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// ErrTimeout error is returned if url contents couldn't be fetched
-// until the given deadline.
-func (c *HostClient) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) {
- return clientGetURLDeadline(dst, url, deadline, c)
-}
-
-// Post sends POST request to the given url with the given POST arguments.
-//
-// Response body is appended to dst, which is returned as body.
-//
-// The function follows redirects. Use Do* for manually handling redirects.
-//
-// New body buffer is allocated if dst is nil.
-//
-// Empty POST body is sent if postArgs is nil.
-func (c *HostClient) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) {
- return clientPostURL(dst, url, postArgs, c)
-}
-
-type clientDoer interface {
- Do(req *Request, resp *Response) error
-}
-
-func clientGetURL(dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) {
- req := AcquireRequest()
-
- statusCode, body, err = doRequestFollowRedirects(req, dst, url, c)
-
- ReleaseRequest(req)
- return statusCode, body, err
-}
-
-func clientGetURLTimeout(dst []byte, url string, timeout time.Duration, c clientDoer) (statusCode int, body []byte, err error) {
- deadline := time.Now().Add(timeout)
- return clientGetURLDeadline(dst, url, deadline, c)
-}
-
-type clientURLResponse struct {
- statusCode int
- body []byte
- err error
-}
-
-func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDoer) (statusCode int, body []byte, err error) {
- timeout := -time.Since(deadline)
- if timeout <= 0 {
- return 0, dst, ErrTimeout
- }
-
- var ch chan clientURLResponse
- chv := clientURLResponseChPool.Get()
- if chv == nil {
- chv = make(chan clientURLResponse, 1)
- }
- ch = chv.(chan clientURLResponse)
-
- req := AcquireRequest()
-
- // Note that the request continues execution on ErrTimeout until
- // client-specific ReadTimeout exceeds. This helps limiting load
- // on slow hosts by MaxConns* concurrent requests.
- //
- // Without this 'hack' the load on slow host could exceed MaxConns*
- // concurrent requests, since timed out requests on client side
- // usually continue execution on the host.
- go func() {
- statusCodeCopy, bodyCopy, errCopy := doRequestFollowRedirects(req, dst, url, c)
- ch <- clientURLResponse{
- statusCode: statusCodeCopy,
- body: bodyCopy,
- err: errCopy,
- }
- }()
-
- tc := acquireTimer(timeout)
- select {
- case resp := <-ch:
- ReleaseRequest(req)
- clientURLResponseChPool.Put(chv)
- statusCode = resp.statusCode
- body = resp.body
- err = resp.err
- case <-tc.C:
- body = dst
- err = ErrTimeout
- }
- releaseTimer(tc)
-
- return statusCode, body, err
-}
-
-var clientURLResponseChPool sync.Pool
-
-func clientPostURL(dst []byte, url string, postArgs *Args, c clientDoer) (statusCode int, body []byte, err error) {
- req := AcquireRequest()
- req.Header.SetMethodBytes(strPost)
- req.Header.SetContentTypeBytes(strPostArgsContentType)
- if postArgs != nil {
- postArgs.WriteTo(req.BodyWriter())
- }
-
- statusCode, body, err = doRequestFollowRedirects(req, dst, url, c)
-
- ReleaseRequest(req)
- return statusCode, body, err
-}
-
-var (
- errMissingLocation = errors.New("missing Location header for http redirect")
- errTooManyRedirects = errors.New("too many redirects detected when doing the request")
-)
-
-const maxRedirectsCount = 16
-
-func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) {
- resp := AcquireResponse()
- bodyBuf := resp.bodyBuffer()
- resp.keepBodyBuffer = true
- oldBody := bodyBuf.B
- bodyBuf.B = dst
-
- redirectsCount := 0
- for {
- req.parsedURI = false
- req.Header.host = req.Header.host[:0]
- req.SetRequestURI(url)
-
- if err = c.Do(req, resp); err != nil {
- break
- }
- statusCode = resp.Header.StatusCode()
- if statusCode != StatusMovedPermanently && statusCode != StatusFound && statusCode != StatusSeeOther {
- break
- }
-
- redirectsCount++
- if redirectsCount > maxRedirectsCount {
- err = errTooManyRedirects
- break
- }
- location := resp.Header.peek(strLocation)
- if len(location) == 0 {
- err = errMissingLocation
- break
- }
- url = getRedirectURL(url, location)
- }
-
- body = bodyBuf.B
- bodyBuf.B = oldBody
- resp.keepBodyBuffer = false
- ReleaseResponse(resp)
-
- return statusCode, body, err
-}
-
-func getRedirectURL(baseURL string, location []byte) string {
- u := AcquireURI()
- u.Update(baseURL)
- u.UpdateBytes(location)
- redirectURL := u.String()
- ReleaseURI(u)
- return redirectURL
-}
-
-var (
- requestPool sync.Pool
- responsePool sync.Pool
-)
-
-// AcquireRequest returns an empty Request instance from request pool.
-//
-// The returned Request instance may be passed to ReleaseRequest when it is
-// no longer needed. This allows Request recycling, reduces GC pressure
-// and usually improves performance.
-func AcquireRequest() *Request {
- v := requestPool.Get()
- if v == nil {
- return &Request{}
- }
- return v.(*Request)
-}
-
-// ReleaseRequest returns req acquired via AcquireRequest to request pool.
-//
-// It is forbidden accessing req and/or its' members after returning
-// it to request pool.
-func ReleaseRequest(req *Request) {
- req.Reset()
- requestPool.Put(req)
-}
-
-// AcquireResponse returns an empty Response instance from response pool.
-//
-// The returned Response instance may be passed to ReleaseResponse when it is
-// no longer needed. This allows Response recycling, reduces GC pressure
-// and usually improves performance.
-func AcquireResponse() *Response {
- v := responsePool.Get()
- if v == nil {
- return &Response{}
- }
- return v.(*Response)
-}
-
-// ReleaseResponse return resp acquired via AcquireResponse to response pool.
-//
-// It is forbidden accessing resp and/or its' members after returning
-// it to response pool.
-func ReleaseResponse(resp *Response) {
- resp.Reset()
- responsePool.Put(resp)
-}
-
-// DoTimeout performs the given request and waits for response during
-// the given timeout duration.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned during
-// the given timeout.
-//
-// ErrNoFreeConns is returned if all HostClient.MaxConns connections
-// to the host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *HostClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
- return clientDoTimeout(req, resp, timeout, c)
-}
-
-// DoDeadline performs the given request and waits for response until
-// the given deadline.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned until
-// the given deadline.
-//
-// ErrNoFreeConns is returned if all HostClient.MaxConns connections
-// to the host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *HostClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- return clientDoDeadline(req, resp, deadline, c)
-}
-
-// DoCtx performs the given request and waits for response until
-// the given context is cancelled or deadline is reached.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned until
-// the deadline provided by the given context.
-//
-// ErrNoFreeConns is returned if all HostClient.MaxConns connections
-// to the host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *HostClient) DoCtx(ctx context.Context, req *Request, resp *Response) error {
- return clientDoCtx(ctx, req, resp, c)
-}
-
-func clientDoTimeout(req *Request, resp *Response, timeout time.Duration, c clientDoer) error {
- deadline := time.Now().Add(timeout)
- return clientDoDeadline(req, resp, deadline, c)
-}
-
-func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c clientDoer) error {
- ctx, cancel := context.WithDeadline(context.Background(), deadline)
- defer cancel()
- return clientDoCtx(ctx, req, resp, c)
-}
-
-func clientDoCtx(ctx context.Context, req *Request, resp *Response, c clientDoer) error {
- var ch chan error
- chv := errorChPool.Get()
- if chv == nil {
- chv = make(chan error, 1)
- }
- ch = chv.(chan error)
-
- // Make req and resp copies, since on timeout they no longer
- // may be accessed.
- reqCopy := AcquireRequest()
- req.CopyTo(reqCopy)
- respCopy := AcquireResponse()
- if resp != nil {
- swapResponseBody(resp, respCopy)
- }
-
- // Note that the request continues execution on ErrTimeout until
- // client-specific ReadTimeout exceeds. This helps limiting load
- // on slow hosts by MaxConns* concurrent requests.
- //
- // Without this 'hack' the load on slow host could exceed MaxConns*
- // concurrent requests, since timed out requests on client side
- // usually continue execution on the host.
- go func() {
- ch <- c.Do(reqCopy, respCopy)
- }()
-
- var err error
- select {
- case err = <-ch:
- if resp != nil {
- respCopy.copyToSkipBody(resp)
- swapResponseBody(resp, respCopy)
- }
- ReleaseResponse(respCopy)
- ReleaseRequest(reqCopy)
- errorChPool.Put(chv)
- case <-ctx.Done():
- err = ctx.Err()
- if errors.Is(err, context.DeadlineExceeded) {
- err = ErrTimeout
- }
- }
-
- return err
-}
-
-var errorChPool sync.Pool
-
-// Do performs the given http request and sets the corresponding response.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrNoFreeConns is returned if all HostClient.MaxConns connections
-// to the host are busy.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *HostClient) Do(req *Request, resp *Response) error {
- var err error
- var retry bool
- maxAttempts := c.MaxIdempotentRequestAttempts
- if maxAttempts <= 0 {
- maxAttempts = 5
- }
- attempts := 0
-
- atomic.AddUint64(&c.pendingRequests, 1)
- for {
- retry, err = c.do(req, resp)
- if err == nil || !retry {
- break
- }
-
- if !isIdempotent(req) {
- // Retry non-idempotent requests if the server closes
- // the connection before sending the response.
- //
- // This case is possible if the server closes the idle
- // keep-alive connection on timeout.
- //
- // Apache and nginx usually do this.
- if err != io.EOF {
- break
- }
- }
- attempts++
- if attempts >= maxAttempts {
- break
- }
- }
- atomic.AddUint64(&c.pendingRequests, ^uint64(0))
-
- if err == io.EOF {
- err = ErrConnectionClosed
- }
- return err
-}
-
-// PendingRequests returns the current number of requests the client
-// is executing.
-//
-// This function may be used for balancing load among multiple HostClient
-// instances.
-func (c *HostClient) PendingRequests() int {
- return int(atomic.LoadUint64(&c.pendingRequests))
-}
-
-func isIdempotent(req *Request) bool {
- return req.Header.IsGet() || req.Header.IsHead() || req.Header.IsPut()
-}
-
-func (c *HostClient) do(req *Request, resp *Response) (bool, error) {
- nilResp := false
- if resp == nil {
- nilResp = true
- resp = AcquireResponse()
- }
-
- ok, err := c.doNonNilReqResp(req, resp)
-
- if nilResp {
- ReleaseResponse(resp)
- }
-
- return ok, err
-}
-
-func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) {
- if req == nil {
- panic("BUG: req cannot be nil")
- }
- if resp == nil {
- panic("BUG: resp cannot be nil")
- }
-
- atomic.StoreUint32(&c.lastUseTime, uint32(time.Now().Unix()-startTimeUnix))
-
- // Free up resources occupied by response before sending the request,
- // so the GC may reclaim these resources (e.g. response body).
- resp.Reset()
-
- cc, err := c.acquireConn()
- if err != nil {
- return false, err
- }
- conn := cc.c
-
- if c.WriteTimeout > 0 {
- // Optimization: update write deadline only if more than 25%
- // of the last write deadline exceeded.
- // See https://github.com/golang/go/issues/15133 for details.
- currentTime := time.Now()
- if currentTime.Sub(cc.lastWriteDeadlineTime) > (c.WriteTimeout >> 2) {
- if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil {
- c.closeConn(cc)
- return true, err
- }
- cc.lastWriteDeadlineTime = currentTime
- }
- }
-
- resetConnection := false
- if c.MaxConnDuration > 0 && time.Since(cc.createdTime) > c.MaxConnDuration && !req.ConnectionClose() {
- req.SetConnectionClose()
- resetConnection = true
- }
-
- userAgentOld := req.Header.UserAgent()
- if len(userAgentOld) == 0 {
- req.Header.userAgent = c.getClientName()
- }
- bw := c.acquireWriter(conn)
- err = req.Write(bw)
- if len(userAgentOld) == 0 {
- req.Header.userAgent = userAgentOld
- }
-
- if resetConnection {
- req.Header.ResetConnectionClose()
- }
-
- if err == nil {
- err = bw.Flush()
- }
- if err != nil {
- c.releaseWriter(bw)
- c.closeConn(cc)
- return true, err
- }
- c.releaseWriter(bw)
-
- if c.ReadTimeout > 0 {
- // Optimization: update read deadline only if more than 25%
- // of the last read deadline exceeded.
- // See https://github.com/golang/go/issues/15133 for details.
- currentTime := time.Now()
- if currentTime.Sub(cc.lastReadDeadlineTime) > (c.ReadTimeout >> 2) {
- if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil {
- c.closeConn(cc)
- return true, err
- }
- cc.lastReadDeadlineTime = currentTime
- }
- }
-
- if !req.Header.IsGet() && req.Header.IsHead() {
- resp.SkipBody = true
- }
-
- br := c.acquireReader(conn)
- if err = resp.ReadLimitBody(br, c.MaxResponseBodySize); err != nil {
- if err == io.EOF && time.Since(cc.createdTime) < time.Second {
- err = io.ErrUnexpectedEOF
- }
- c.releaseReader(br)
- c.closeConn(cc)
- return true, err
- }
- c.releaseReader(br)
-
- if resetConnection || req.ConnectionClose() || resp.ConnectionClose() {
- c.closeConn(cc)
- } else {
- c.releaseConn(cc)
- }
-
- return false, err
-}
-
-var (
- // ErrNoFreeConns is returned when no free connections available
- // to the given host.
- //
- // Increase the allowed number of connections per host if you
- // see this error.
- ErrNoFreeConns = errors.New("no free connections available to host")
-
- // ErrTimeout is returned from timed out calls.
- ErrTimeout = errors.New("timeout")
-
- // ErrConnectionClosed may be returned from client methods if the server
- // closes connection before returning the first response byte.
- //
- // If you see this error, then either fix the server by returning
- // 'Connection: close' response header before closing the connection
- // or add 'Connection: close' request header before sending requests
- // to broken server.
- ErrConnectionClosed = errors.New("the server closed connection before returning the first response byte. " +
- "Make sure the server returns 'Connection: close' response header before closing the connection")
-)
-
-func (c *HostClient) acquireConn() (*clientConn, error) {
- var cc *clientConn
- createConn := false
- startCleaner := false
-
- var n int
- c.connsLock.Lock()
- n = len(c.conns)
- if n == 0 {
- maxConns := c.MaxConns
- if maxConns <= 0 {
- maxConns = DefaultMaxConnsPerHost
- }
- if c.connsCount < maxConns {
- c.connsCount++
- createConn = true
- if !c.connsCleanerRun {
- startCleaner = true
- c.connsCleanerRun = true
- }
- }
- } else {
- n--
- cc = c.conns[n]
- c.conns[n] = nil
- c.conns = c.conns[:n]
- }
- c.connsLock.Unlock()
-
- if cc != nil {
- return cc, nil
- }
- if !createConn {
- return nil, ErrNoFreeConns
- }
-
- if startCleaner {
- go c.connsCleaner()
- }
-
- conn, err := c.dialHostHard()
- if err != nil {
- c.decConnsCount()
- return nil, err
- }
- cc = acquireClientConn(conn)
-
- return cc, nil
-}
-
-func (c *HostClient) connsCleaner() {
- var (
- scratch []*clientConn
- maxIdleConnDuration = c.MaxIdleConnDuration
- )
- if maxIdleConnDuration <= 0 {
- maxIdleConnDuration = DefaultMaxIdleConnDuration
- }
- for {
- currentTime := time.Now()
-
- // Determine idle connections to be closed.
- c.connsLock.Lock()
- conns := c.conns
- n := len(conns)
- i := 0
- for i < n && currentTime.Sub(conns[i].lastUseTime) > maxIdleConnDuration {
- i++
- }
- scratch = append(scratch[:0], conns[:i]...)
- if i > 0 {
- m := copy(conns, conns[i:])
- for i = m; i < n; i++ {
- conns[i] = nil
- }
- c.conns = conns[:m]
- }
- c.connsLock.Unlock()
-
- // Close idle connections.
- for i, cc := range scratch {
- c.closeConn(cc)
- scratch[i] = nil
- }
-
- // Determine whether to stop the connsCleaner.
- c.connsLock.Lock()
- mustStop := c.connsCount == 0
- if mustStop {
- c.connsCleanerRun = false
- }
- c.connsLock.Unlock()
- if mustStop {
- break
- }
-
- time.Sleep(maxIdleConnDuration)
- }
-}
-
-func (c *HostClient) closeConn(cc *clientConn) {
- c.decConnsCount()
- cc.c.Close()
- releaseClientConn(cc)
-}
-
-func (c *HostClient) decConnsCount() {
- c.connsLock.Lock()
- c.connsCount--
- c.connsLock.Unlock()
-}
-
-func acquireClientConn(conn net.Conn) *clientConn {
- v := clientConnPool.Get()
- if v == nil {
- v = &clientConn{}
- }
- cc := v.(*clientConn)
- cc.c = conn
- cc.createdTime = time.Now()
- return cc
-}
-
-func releaseClientConn(cc *clientConn) {
- cc.reset()
- clientConnPool.Put(cc)
-}
-
-var clientConnPool sync.Pool
-
-func (c *HostClient) releaseConn(cc *clientConn) {
- cc.lastUseTime = time.Now()
- c.connsLock.Lock()
- c.conns = append(c.conns, cc)
- c.connsLock.Unlock()
-}
-
-func (c *HostClient) acquireWriter(conn net.Conn) *bufio.Writer {
- v := c.writerPool.Get()
- if v == nil {
- n := c.WriteBufferSize
- if n <= 0 {
- n = defaultWriteBufferSize
- }
- return bufio.NewWriterSize(conn, n)
- }
- bw := v.(*bufio.Writer)
- bw.Reset(conn)
- return bw
-}
-
-func (c *HostClient) releaseWriter(bw *bufio.Writer) {
- c.writerPool.Put(bw)
-}
-
-func (c *HostClient) acquireReader(conn net.Conn) *bufio.Reader {
- v := c.readerPool.Get()
- if v == nil {
- n := c.ReadBufferSize
- if n <= 0 {
- n = defaultReadBufferSize
- }
- return bufio.NewReaderSize(conn, n)
- }
- br := v.(*bufio.Reader)
- br.Reset(conn)
- return br
-}
-
-func (c *HostClient) releaseReader(br *bufio.Reader) {
- c.readerPool.Put(br)
-}
-
-func newClientTLSConfig(c *tls.Config, addr string) *tls.Config {
- if c == nil {
- c = &tls.Config{}
- } else {
- c = c.Clone()
- }
-
- if c.ClientSessionCache == nil {
- c.ClientSessionCache = tls.NewLRUClientSessionCache(0)
- }
-
- if len(c.ServerName) == 0 {
- serverName := tlsServerName(addr)
- if serverName == "*" {
- c.InsecureSkipVerify = true
- } else {
- c.ServerName = serverName
- }
- }
- return c
-}
-
-func tlsServerName(addr string) string {
- if !strings.Contains(addr, ":") {
- return addr
- }
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return "*"
- }
- return host
-}
-
-func (c *HostClient) nextAddr() string {
- c.addrsLock.Lock()
- if c.addrs == nil {
- c.addrs = strings.Split(c.Addr, ",")
- }
- addr := c.addrs[0]
- if len(c.addrs) > 1 {
- addr = c.addrs[c.addrIdx%uint32(len(c.addrs))]
- c.addrIdx++
- }
- c.addrsLock.Unlock()
- return addr
-}
-
-func (c *HostClient) dialHostHard() (conn net.Conn, err error) {
- // attempt to dial all the available hosts before giving up.
-
- c.addrsLock.Lock()
- n := len(c.addrs)
- c.addrsLock.Unlock()
-
- if n == 0 {
- // It looks like c.addrs isn't initialized yet.
- n = 1
- }
-
- timeout := c.ReadTimeout + c.WriteTimeout
- if timeout <= 0 {
- timeout = DefaultDialTimeout
- }
- deadline := time.Now().Add(timeout)
- for n > 0 {
- addr := c.nextAddr()
- tlsConfig := c.cachedTLSConfig(addr)
- conn, err = dialAddr(addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig)
- if err == nil {
- return conn, nil
- }
- if time.Since(deadline) >= 0 {
- break
- }
- n--
- }
- return nil, err
-}
-
-func (c *HostClient) cachedTLSConfig(addr string) *tls.Config {
- if !c.IsTLS {
- return nil
- }
-
- c.tlsConfigMapLock.Lock()
- if c.tlsConfigMap == nil {
- c.tlsConfigMap = make(map[string]*tls.Config)
- }
- cfg := c.tlsConfigMap[addr]
- if cfg == nil {
- cfg = newClientTLSConfig(c.TLSConfig, addr)
- c.tlsConfigMap[addr] = cfg
- }
- c.tlsConfigMapLock.Unlock()
-
- return cfg
-}
-
-func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *tls.Config) (net.Conn, error) {
- if dial == nil {
- if dialDualStack {
- dial = DialDualStack
- } else {
- dial = Dial
- }
- addr = addMissingPort(addr, isTLS)
- }
- conn, err := dial(addr)
- if err != nil {
- return nil, err
- }
- if conn == nil {
- panic("BUG: DialFunc returned (nil, nil)")
- }
- if isTLS {
- conn = tls.Client(conn, tlsConfig)
- }
- return conn, nil
-}
-
-func (c *HostClient) getClientName() []byte {
- v := c.clientName.Load()
- var clientName []byte
- if v == nil {
- clientName = []byte(c.Name)
- if len(clientName) == 0 {
- clientName = defaultUserAgent
- }
- c.clientName.Store(clientName)
- } else {
- clientName = v.([]byte)
- }
- return clientName
-}
-
-func addMissingPort(addr string, isTLS bool) string {
- n := strings.Index(addr, ":")
- if n >= 0 {
- return addr
- }
- port := 80
- if isTLS {
- port = 443
- }
- return fmt.Sprintf("%s:%d", addr, port)
-}
-
-// PipelineClient pipelines requests over a limited set of concurrent
-// connections to the given Addr.
-//
-// This client may be used in highly loaded HTTP-based RPC systems for reducing
-// context switches and network level overhead.
-// See https://en.wikipedia.org/wiki/HTTP_pipelining for details.
-//
-// It is forbidden copying PipelineClient instances. Create new instances
-// instead.
-//
-// It is safe calling PipelineClient methods from concurrently running
-// goroutines.
-type PipelineClient struct {
- noCopy noCopy
-
- // Address of the host to connect to.
- Addr string
-
- // The maximum number of concurrent connections to the Addr.
- //
- // A sinle connection is used by default.
- MaxConns int
-
- // The maximum number of pending pipelined requests over
- // a single connection to Addr.
- //
- // DefaultMaxPendingRequests is used by default.
- MaxPendingRequests int
-
- // The maximum delay before sending pipelined requests as a batch
- // to the server.
- //
- // By default requests are sent immediately to the server.
- MaxBatchDelay time.Duration
-
- // Callback for connection establishing to the host.
- //
- // Default Dial is used if not set.
- Dial DialFunc
-
- // Attempt to connect to both ipv4 and ipv6 host addresses
- // if set to true.
- //
- // This option is used only if default TCP dialer is used,
- // i.e. if Dial is blank.
- //
- // By default client connects only to ipv4 addresses,
- // since unfortunately ipv6 remains broken in many networks worldwide :)
- DialDualStack bool
-
- // Whether to use TLS (aka SSL or HTTPS) for host connections.
- IsTLS bool
-
- // Optional TLS config.
- TLSConfig *tls.Config
-
- // Idle connection to the host is closed after this duration.
- //
- // By default idle connection is closed after
- // DefaultMaxIdleConnDuration.
- MaxIdleConnDuration time.Duration
-
- // Buffer size for responses' reading.
- // This also limits the maximum header size.
- //
- // Default buffer size is used if 0.
- ReadBufferSize int
-
- // Buffer size for requests' writing.
- //
- // Default buffer size is used if 0.
- WriteBufferSize int
-
- // Maximum duration for full response reading (including body).
- //
- // By default response read timeout is unlimited.
- ReadTimeout time.Duration
-
- // Maximum duration for full request writing (including body).
- //
- // By default request write timeout is unlimited.
- WriteTimeout time.Duration
-
- // Logger for logging client errors.
- //
- // By default standard logger from log package is used.
- Logger Logger
-
- connClients []*pipelineConnClient
- connClientsLock sync.Mutex
-}
-
-type pipelineConnClient struct {
- noCopy noCopy
-
- Addr string
- MaxPendingRequests int
- MaxBatchDelay time.Duration
- Dial DialFunc
- DialDualStack bool
- IsTLS bool
- TLSConfig *tls.Config
- MaxIdleConnDuration time.Duration
- ReadBufferSize int
- WriteBufferSize int
- ReadTimeout time.Duration
- WriteTimeout time.Duration
- Logger Logger
-
- workPool sync.Pool
-
- chLock sync.Mutex
- chW chan *pipelineWork
- chR chan *pipelineWork
-
- tlsConfigLock sync.Mutex
- tlsConfig *tls.Config
-}
-
-type pipelineWork struct {
- reqCopy Request
- respCopy Response
- req *Request
- resp *Response
- t *time.Timer
- deadline time.Time
- err error
- done chan struct{}
-}
-
-// DoTimeout performs the given request and waits for response during
-// the given timeout duration.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned during
-// the given timeout.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *PipelineClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
- return c.DoDeadline(req, resp, time.Now().Add(timeout))
-}
-
-// DoDeadline performs the given request and waits for response until
-// the given deadline.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects.
-//
-// Response is ignored if resp is nil.
-//
-// ErrTimeout is returned if the response wasn't returned until
-// the given deadline.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *PipelineClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- return c.getConnClient().DoDeadline(req, resp, deadline)
-}
-
-func (c *pipelineConnClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- c.init()
-
- timeout := -time.Since(deadline)
- if timeout < 0 {
- return ErrTimeout
- }
-
- w := acquirePipelineWork(&c.workPool, timeout)
- w.req = &w.reqCopy
- w.resp = &w.respCopy
-
- // Make a copy of the request in order to avoid data races on timeouts
- req.copyToSkipBody(&w.reqCopy)
- swapRequestBody(req, &w.reqCopy)
-
- // Put the request to outgoing queue
- select {
- case c.chW <- w:
- // Fast path: len(c.ch) < cap(c.ch)
- default:
- // Slow path
- select {
- case c.chW <- w:
- case <-w.t.C:
- releasePipelineWork(&c.workPool, w)
- return ErrTimeout
- }
- }
-
- // Wait for the response
- var err error
- select {
- case <-w.done:
- if resp != nil {
- w.respCopy.copyToSkipBody(resp)
- swapResponseBody(resp, &w.respCopy)
- }
- err = w.err
- releasePipelineWork(&c.workPool, w)
- case <-w.t.C:
- err = ErrTimeout
- }
-
- return err
-}
-
-// Do performs the given http request and sets the corresponding response.
-//
-// Request must contain at least non-zero RequestURI with full url (including
-// scheme and host) or non-zero Host header + RequestURI.
-//
-// The function doesn't follow redirects. Use Get* for following redirects.
-//
-// Response is ignored if resp is nil.
-//
-// It is recommended obtaining req and resp via AcquireRequest
-// and AcquireResponse in performance-critical code.
-func (c *PipelineClient) Do(req *Request, resp *Response) error {
- return c.getConnClient().Do(req, resp)
-}
-
-func (c *pipelineConnClient) Do(req *Request, resp *Response) error {
- c.init()
-
- w := acquirePipelineWork(&c.workPool, 0)
- w.req = req
- if resp != nil {
- w.resp = resp
- } else {
- w.resp = &w.respCopy
- }
-
- // Put the request to outgoing queue
- select {
- case c.chW <- w:
- default:
- // Try substituting the oldest w with the current one.
- select {
- case wOld := <-c.chW:
- wOld.err = ErrPipelineOverflow
- wOld.done <- struct{}{}
- default:
- }
- select {
- case c.chW <- w:
- default:
- releasePipelineWork(&c.workPool, w)
- return ErrPipelineOverflow
- }
- }
-
- // Wait for the response
- <-w.done
- err := w.err
-
- releasePipelineWork(&c.workPool, w)
-
- return err
-}
-
-func (c *PipelineClient) getConnClient() *pipelineConnClient {
- c.connClientsLock.Lock()
- cc := c.getConnClientUnlocked()
- c.connClientsLock.Unlock()
- return cc
-}
-
-func (c *PipelineClient) getConnClientUnlocked() *pipelineConnClient {
- if len(c.connClients) == 0 {
- return c.newConnClient()
- }
-
- // Return the client with the minimum number of pending requests.
- minCC := c.connClients[0]
- minReqs := minCC.PendingRequests()
- if minReqs == 0 {
- return minCC
- }
- for i := 1; i < len(c.connClients); i++ {
- cc := c.connClients[i]
- reqs := cc.PendingRequests()
- if reqs == 0 {
- return cc
- }
- if reqs < minReqs {
- minCC = cc
- minReqs = reqs
- }
- }
-
- maxConns := c.MaxConns
- if maxConns <= 0 {
- maxConns = 1
- }
- if len(c.connClients) < maxConns {
- return c.newConnClient()
- }
- return minCC
-}
-
-func (c *PipelineClient) newConnClient() *pipelineConnClient {
- cc := &pipelineConnClient{
- Addr: c.Addr,
- MaxPendingRequests: c.MaxPendingRequests,
- MaxBatchDelay: c.MaxBatchDelay,
- Dial: c.Dial,
- DialDualStack: c.DialDualStack,
- IsTLS: c.IsTLS,
- TLSConfig: c.TLSConfig,
- MaxIdleConnDuration: c.MaxIdleConnDuration,
- ReadBufferSize: c.ReadBufferSize,
- WriteBufferSize: c.WriteBufferSize,
- ReadTimeout: c.ReadTimeout,
- WriteTimeout: c.WriteTimeout,
- Logger: c.Logger,
- }
- c.connClients = append(c.connClients, cc)
- return cc
-}
-
-// ErrPipelineOverflow may be returned from PipelineClient.Do*
-// if the requests' queue is overflown.
-var ErrPipelineOverflow = errors.New("pipelined requests' queue has been overflown. Increase MaxConns and/or MaxPendingRequests")
-
-// DefaultMaxPendingRequests is the default value
-// for PipelineClient.MaxPendingRequests.
-const DefaultMaxPendingRequests = 1024
-
-func (c *pipelineConnClient) init() {
- c.chLock.Lock()
- if c.chR == nil {
- maxPendingRequests := c.MaxPendingRequests
- if maxPendingRequests <= 0 {
- maxPendingRequests = DefaultMaxPendingRequests
- }
- c.chR = make(chan *pipelineWork, maxPendingRequests)
- if c.chW == nil {
- c.chW = make(chan *pipelineWork, maxPendingRequests)
- }
- go func() {
- if err := c.worker(); err != nil {
- c.logger().Printf("error in PipelineClient(%q): %s", c.Addr, err)
- if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
- // Throttle client reconnections on temporary errors
- time.Sleep(time.Second)
- }
- }
-
- c.chLock.Lock()
- // Do not reset c.chW to nil, since it may contain
- // pending requests, which could be served on the next
- // connection to the host.
- c.chR = nil
- c.chLock.Unlock()
- }()
- }
- c.chLock.Unlock()
-}
-
-func (c *pipelineConnClient) worker() error {
- tlsConfig := c.cachedTLSConfig()
- conn, err := dialAddr(c.Addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig)
- if err != nil {
- return err
- }
-
- // Start reader and writer
- stopW := make(chan struct{})
- doneW := make(chan error)
- go func() {
- doneW <- c.writer(conn, stopW)
- }()
- stopR := make(chan struct{})
- doneR := make(chan error)
- go func() {
- doneR <- c.reader(conn, stopR)
- }()
-
- // Wait until reader and writer are stopped
- select {
- case err = <-doneW:
- conn.Close()
- close(stopR)
- <-doneR
- case err = <-doneR:
- conn.Close()
- close(stopW)
- <-doneW
- }
-
- // Notify pending readers
- for len(c.chR) > 0 {
- w := <-c.chR
- w.err = errPipelineConnStopped
- w.done <- struct{}{}
- }
-
- return err
-}
-
-func (c *pipelineConnClient) cachedTLSConfig() *tls.Config {
- if !c.IsTLS {
- return nil
- }
-
- c.tlsConfigLock.Lock()
- cfg := c.tlsConfig
- if cfg == nil {
- cfg = newClientTLSConfig(c.TLSConfig, c.Addr)
- c.tlsConfig = cfg
- }
- c.tlsConfigLock.Unlock()
-
- return cfg
-}
-
-func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error {
- writeBufferSize := c.WriteBufferSize
- if writeBufferSize <= 0 {
- writeBufferSize = defaultWriteBufferSize
- }
- bw := bufio.NewWriterSize(conn, writeBufferSize)
- defer bw.Flush()
- chR := c.chR
- chW := c.chW
- writeTimeout := c.WriteTimeout
-
- maxIdleConnDuration := c.MaxIdleConnDuration
- if maxIdleConnDuration <= 0 {
- maxIdleConnDuration = DefaultMaxIdleConnDuration
- }
- maxBatchDelay := c.MaxBatchDelay
-
- var (
- stopTimer = time.NewTimer(time.Hour)
- flushTimer = time.NewTimer(time.Hour)
- flushTimerCh <-chan time.Time
- instantTimerCh = make(chan time.Time)
-
- w *pipelineWork
- err error
-
- lastWriteDeadlineTime time.Time
- )
- close(instantTimerCh)
- for {
- againChW:
- select {
- case w = <-chW:
- // Fast path: len(chW) > 0
- default:
- // Slow path
- stopTimer.Reset(maxIdleConnDuration)
- select {
- case w = <-chW:
- case <-stopTimer.C:
- return nil
- case <-stopCh:
- return nil
- case <-flushTimerCh:
- if err = bw.Flush(); err != nil {
- return err
- }
- flushTimerCh = nil
- goto againChW
- }
- }
-
- if !w.deadline.IsZero() && time.Since(w.deadline) >= 0 {
- w.err = ErrTimeout
- w.done <- struct{}{}
- continue
- }
-
- if writeTimeout > 0 {
- // Optimization: update write deadline only if more than 25%
- // of the last write deadline exceeded.
- // See https://github.com/golang/go/issues/15133 for details.
- currentTime := time.Now()
- if currentTime.Sub(lastWriteDeadlineTime) > (writeTimeout >> 2) {
- if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil {
- w.err = err
- w.done <- struct{}{}
- return err
- }
- lastWriteDeadlineTime = currentTime
- }
- }
- if err = w.req.Write(bw); err != nil {
- w.err = err
- w.done <- struct{}{}
- return err
- }
- if flushTimerCh == nil && (len(chW) == 0 || len(chR) == cap(chR)) {
- if maxBatchDelay > 0 {
- flushTimer.Reset(maxBatchDelay)
- flushTimerCh = flushTimer.C
- } else {
- flushTimerCh = instantTimerCh
- }
- }
-
- againChR:
- select {
- case chR <- w:
- // Fast path: len(chR) < cap(chR)
- default:
- // Slow path
- select {
- case chR <- w:
- case <-stopCh:
- w.err = errPipelineConnStopped
- w.done <- struct{}{}
- return nil
- case <-flushTimerCh:
- if err = bw.Flush(); err != nil {
- w.err = err
- w.done <- struct{}{}
- return err
- }
- flushTimerCh = nil
- goto againChR
- }
- }
- }
-}
-
-func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error {
- readBufferSize := c.ReadBufferSize
- if readBufferSize <= 0 {
- readBufferSize = defaultReadBufferSize
- }
- br := bufio.NewReaderSize(conn, readBufferSize)
- chR := c.chR
- readTimeout := c.ReadTimeout
-
- var (
- w *pipelineWork
- err error
-
- lastReadDeadlineTime time.Time
- )
- for {
- select {
- case w = <-chR:
- // Fast path: len(chR) > 0
- default:
- // Slow path
- select {
- case w = <-chR:
- case <-stopCh:
- return nil
- }
- }
-
- if readTimeout > 0 {
- // Optimization: update read deadline only if more than 25%
- // of the last read deadline exceeded.
- // See https://github.com/golang/go/issues/15133 for details.
- currentTime := time.Now()
- if currentTime.Sub(lastReadDeadlineTime) > (readTimeout >> 2) {
- if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil {
- w.err = err
- w.done <- struct{}{}
- return err
- }
- lastReadDeadlineTime = currentTime
- }
- }
- if err = w.resp.Read(br); err != nil {
- w.err = err
- w.done <- struct{}{}
- return err
- }
-
- w.done <- struct{}{}
- }
-}
-
-func (c *pipelineConnClient) logger() Logger {
- if c.Logger != nil {
- return c.Logger
- }
- return defaultLogger
-}
-
-// PendingRequests returns the current number of pending requests pipelined
-// to the server.
-//
-// This number may exceed MaxPendingRequests*MaxConns by up to two times, since
-// each connection to the server may keep up to MaxPendingRequests requests
-// in the queue before sending them to the server.
-//
-// This function may be used for balancing load among multiple PipelineClient
-// instances.
-func (c *PipelineClient) PendingRequests() int {
- c.connClientsLock.Lock()
- n := 0
- for _, cc := range c.connClients {
- n += cc.PendingRequests()
- }
- c.connClientsLock.Unlock()
- return n
-}
-
-func (c *pipelineConnClient) PendingRequests() int {
- c.init()
-
- c.chLock.Lock()
- n := len(c.chR) + len(c.chW)
- c.chLock.Unlock()
- return n
-}
-
-var errPipelineConnStopped = errors.New("pipeline connection has been stopped")
-
-func acquirePipelineWork(pool *sync.Pool, timeout time.Duration) *pipelineWork {
- v := pool.Get()
- if v == nil {
- v = &pipelineWork{
- done: make(chan struct{}, 1),
- }
- }
- w := v.(*pipelineWork)
- if timeout > 0 {
- if w.t == nil {
- w.t = time.NewTimer(timeout)
- } else {
- w.t.Reset(timeout)
- }
- w.deadline = time.Now().Add(timeout)
- } else {
- w.deadline = zeroTime
- }
- return w
-}
-
-func releasePipelineWork(pool *sync.Pool, w *pipelineWork) {
- if w.t != nil {
- w.t.Stop()
- }
- w.reqCopy.Reset()
- w.respCopy.Reset()
- w.req = nil
- w.resp = nil
- w.err = nil
- pool.Put(w)
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/compress.go b/vendor/github.com/VictoriaMetrics/fasthttp/compress.go
deleted file mode 100644
index c37a807ea..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/compress.go
+++ /dev/null
@@ -1,440 +0,0 @@
-package fasthttp
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "sync"
-
- "github.com/VictoriaMetrics/fasthttp/stackless"
- "github.com/klauspost/compress/flate"
- "github.com/klauspost/compress/gzip"
- "github.com/klauspost/compress/zlib"
- "github.com/valyala/bytebufferpool"
-)
-
-// Supported compression levels.
-const (
- CompressNoCompression = flate.NoCompression
- CompressBestSpeed = flate.BestSpeed
- CompressBestCompression = flate.BestCompression
- CompressDefaultCompression = 6 // flate.DefaultCompression
- CompressHuffmanOnly = -2 // flate.HuffmanOnly
-)
-
-func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
- v := gzipReaderPool.Get()
- if v == nil {
- return gzip.NewReader(r)
- }
- zr := v.(*gzip.Reader)
- if err := zr.Reset(r); err != nil {
- return nil, err
- }
- return zr, nil
-}
-
-func releaseGzipReader(zr *gzip.Reader) {
- zr.Close()
- gzipReaderPool.Put(zr)
-}
-
-var gzipReaderPool sync.Pool
-
-func acquireFlateReader(r io.Reader) (io.ReadCloser, error) {
- v := flateReaderPool.Get()
- if v == nil {
- zr, err := zlib.NewReader(r)
- if err != nil {
- return nil, err
- }
- return zr, nil
- }
- zr := v.(io.ReadCloser)
- if err := resetFlateReader(zr, r); err != nil {
- return nil, err
- }
- return zr, nil
-}
-
-func releaseFlateReader(zr io.ReadCloser) {
- zr.Close()
- flateReaderPool.Put(zr)
-}
-
-func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
- zrr, ok := zr.(zlib.Resetter)
- if !ok {
- panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
- }
- return zrr.Reset(r, nil)
-}
-
-var flateReaderPool sync.Pool
-
-func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
- nLevel := normalizeCompressLevel(level)
- p := stacklessGzipWriterPoolMap[nLevel]
- v := p.Get()
- if v == nil {
- return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
- return acquireRealGzipWriter(w, level)
- })
- }
- sw := v.(stackless.Writer)
- sw.Reset(w)
- return sw
-}
-
-func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
- sw.Close()
- nLevel := normalizeCompressLevel(level)
- p := stacklessGzipWriterPoolMap[nLevel]
- p.Put(sw)
-}
-
-func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
- nLevel := normalizeCompressLevel(level)
- p := realGzipWriterPoolMap[nLevel]
- v := p.Get()
- if v == nil {
- zw, err := gzip.NewWriterLevel(w, level)
- if err != nil {
- panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
- }
- return zw
- }
- zw := v.(*gzip.Writer)
- zw.Reset(w)
- return zw
-}
-
-func releaseRealGzipWriter(zw *gzip.Writer, level int) {
- zw.Close()
- nLevel := normalizeCompressLevel(level)
- p := realGzipWriterPoolMap[nLevel]
- p.Put(zw)
-}
-
-var (
- stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
- realGzipWriterPoolMap = newCompressWriterPoolMap()
-)
-
-// AppendGzipBytesLevel appends gzipped src to dst using the given
-// compression level and returns the resulting dst.
-//
-// Supported compression levels are:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
- w := &byteSliceWriter{dst}
- WriteGzipLevel(w, src, level)
- return w.b
-}
-
-// WriteGzipLevel writes gzipped p to w using the given compression level
-// and returns the number of compressed bytes written to w.
-//
-// Supported compression levels are:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
- switch w.(type) {
- case *byteSliceWriter,
- *bytes.Buffer,
- *ByteBuffer,
- *bytebufferpool.ByteBuffer:
- // These writers don't block, so we can just use stacklessWriteGzip
- ctx := &compressCtx{
- w: w,
- p: p,
- level: level,
- }
- stacklessWriteGzip(ctx)
- return len(p), nil
- default:
- zw := acquireStacklessGzipWriter(w, level)
- n, err := zw.Write(p)
- releaseStacklessGzipWriter(zw, level)
- return n, err
- }
-}
-
-var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)
-
-func nonblockingWriteGzip(ctxv interface{}) {
- ctx := ctxv.(*compressCtx)
- zw := acquireRealGzipWriter(ctx.w, ctx.level)
-
- _, err := zw.Write(ctx.p)
- if err != nil {
- panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
- }
-
- releaseRealGzipWriter(zw, ctx.level)
-}
-
-// WriteGzip writes gzipped p to w and returns the number of compressed
-// bytes written to w.
-func WriteGzip(w io.Writer, p []byte) (int, error) {
- return WriteGzipLevel(w, p, CompressDefaultCompression)
-}
-
-// AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
-func AppendGzipBytes(dst, src []byte) []byte {
- return AppendGzipBytesLevel(dst, src, CompressDefaultCompression)
-}
-
-// WriteGunzip writes ungzipped p to w and returns the number of uncompressed
-// bytes written to w.
-func WriteGunzip(w io.Writer, p []byte) (int, error) {
- r := &byteSliceReader{p}
- zr, err := acquireGzipReader(r)
- if err != nil {
- return 0, err
- }
- n, err := copyZeroAlloc(w, zr)
- releaseGzipReader(zr)
- nn := int(n)
- if int64(nn) != n {
- return 0, fmt.Errorf("too much data gunzipped: %d", n)
- }
- return nn, err
-}
-
-// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
-func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
- w := &byteSliceWriter{dst}
- _, err := WriteGunzip(w, src)
- return w.b, err
-}
-
-// AppendDeflateBytesLevel appends deflated src to dst using the given
-// compression level and returns the resulting dst.
-//
-// Supported compression levels are:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
- w := &byteSliceWriter{dst}
- WriteDeflateLevel(w, src, level)
- return w.b
-}
-
-// WriteDeflateLevel writes deflated p to w using the given compression level
-// and returns the number of compressed bytes written to w.
-//
-// Supported compression levels are:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
- switch w.(type) {
- case *byteSliceWriter,
- *bytes.Buffer,
- *ByteBuffer,
- *bytebufferpool.ByteBuffer:
- // These writers don't block, so we can just use stacklessWriteDeflate
- ctx := &compressCtx{
- w: w,
- p: p,
- level: level,
- }
- stacklessWriteDeflate(ctx)
- return len(p), nil
- default:
- zw := acquireStacklessDeflateWriter(w, level)
- n, err := zw.Write(p)
- releaseStacklessDeflateWriter(zw, level)
- return n, err
- }
-}
-
-var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)
-
-func nonblockingWriteDeflate(ctxv interface{}) {
- ctx := ctxv.(*compressCtx)
- zw := acquireRealDeflateWriter(ctx.w, ctx.level)
-
- _, err := zw.Write(ctx.p)
- if err != nil {
- panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
- }
-
- releaseRealDeflateWriter(zw, ctx.level)
-}
-
-type compressCtx struct {
- w io.Writer
- p []byte
- level int
-}
-
-// WriteDeflate writes deflated p to w and returns the number of compressed
-// bytes written to w.
-func WriteDeflate(w io.Writer, p []byte) (int, error) {
- return WriteDeflateLevel(w, p, CompressDefaultCompression)
-}
-
-// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
-func AppendDeflateBytes(dst, src []byte) []byte {
- return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
-}
-
-// WriteInflate writes inflated p to w and returns the number of uncompressed
-// bytes written to w.
-func WriteInflate(w io.Writer, p []byte) (int, error) {
- r := &byteSliceReader{p}
- zr, err := acquireFlateReader(r)
- if err != nil {
- return 0, err
- }
- n, err := copyZeroAlloc(w, zr)
- releaseFlateReader(zr)
- nn := int(n)
- if int64(nn) != n {
- return 0, fmt.Errorf("too much data inflated: %d", n)
- }
- return nn, err
-}
-
-// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
-func AppendInflateBytes(dst, src []byte) ([]byte, error) {
- w := &byteSliceWriter{dst}
- _, err := WriteInflate(w, src)
- return w.b, err
-}
-
-type byteSliceWriter struct {
- b []byte
-}
-
-func (w *byteSliceWriter) Write(p []byte) (int, error) {
- w.b = append(w.b, p...)
- return len(p), nil
-}
-
-type byteSliceReader struct {
- b []byte
-}
-
-func (r *byteSliceReader) Read(p []byte) (int, error) {
- if len(r.b) == 0 {
- return 0, io.EOF
- }
- n := copy(p, r.b)
- r.b = r.b[n:]
- return n, nil
-}
-
-func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
- nLevel := normalizeCompressLevel(level)
- p := stacklessDeflateWriterPoolMap[nLevel]
- v := p.Get()
- if v == nil {
- return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
- return acquireRealDeflateWriter(w, level)
- })
- }
- sw := v.(stackless.Writer)
- sw.Reset(w)
- return sw
-}
-
-func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
- sw.Close()
- nLevel := normalizeCompressLevel(level)
- p := stacklessDeflateWriterPoolMap[nLevel]
- p.Put(sw)
-}
-
-func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
- nLevel := normalizeCompressLevel(level)
- p := realDeflateWriterPoolMap[nLevel]
- v := p.Get()
- if v == nil {
- zw, err := zlib.NewWriterLevel(w, level)
- if err != nil {
- panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err))
- }
- return zw
- }
- zw := v.(*zlib.Writer)
- zw.Reset(w)
- return zw
-}
-
-func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
- zw.Close()
- nLevel := normalizeCompressLevel(level)
- p := realDeflateWriterPoolMap[nLevel]
- p.Put(zw)
-}
-
-var (
- stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
- realDeflateWriterPoolMap = newCompressWriterPoolMap()
-)
-
-func newCompressWriterPoolMap() []*sync.Pool {
- // Initialize pools for all the compression levels defined
- // in https://golang.org/pkg/compress/flate/#pkg-constants .
- // Compression levels are normalized with normalizeCompressLevel,
- // so the fit [0..11].
- var m []*sync.Pool
- for i := 0; i < 12; i++ {
- m = append(m, &sync.Pool{})
- }
- return m
-}
-
-func isFileCompressible(f *os.File, minCompressRatio float64) bool {
- // Try compressing the first 4kb of of the file
- // and see if it can be compressed by more than
- // the given minCompressRatio.
- b := AcquireByteBuffer()
- zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
- lr := &io.LimitedReader{
- R: f,
- N: 4096,
- }
- _, err := copyZeroAlloc(zw, lr)
- releaseStacklessGzipWriter(zw, CompressDefaultCompression)
- f.Seek(0, 0)
- if err != nil {
- return false
- }
-
- n := 4096 - lr.N
- zn := len(b.B)
- ReleaseByteBuffer(b)
- return float64(zn) < float64(n)*minCompressRatio
-}
-
-// normalizes compression level into [0..11], so it could be used as an index
-// in *PoolMap.
-func normalizeCompressLevel(level int) int {
- // -2 is the lowest compression level - CompressHuffmanOnly
- // 9 is the highest compression level - CompressBestCompression
- if level < -2 || level > 9 {
- level = CompressDefaultCompression
- }
- return level + 2
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/cookie.go b/vendor/github.com/VictoriaMetrics/fasthttp/cookie.go
deleted file mode 100644
index 17ecc626d..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/cookie.go
+++ /dev/null
@@ -1,396 +0,0 @@
-package fasthttp
-
-import (
- "bytes"
- "errors"
- "io"
- "sync"
- "time"
-)
-
-var zeroTime time.Time
-
-var (
- // CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie.
- CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
-
- // CookieExpireUnlimited indicates that the cookie doesn't expire.
- CookieExpireUnlimited = zeroTime
-)
-
-// AcquireCookie returns an empty Cookie object from the pool.
-//
-// The returned object may be returned back to the pool with ReleaseCookie.
-// This allows reducing GC load.
-func AcquireCookie() *Cookie {
- return cookiePool.Get().(*Cookie)
-}
-
-// ReleaseCookie returns the Cookie object acquired with AcquireCookie back
-// to the pool.
-//
-// Do not access released Cookie object, otherwise data races may occur.
-func ReleaseCookie(c *Cookie) {
- c.Reset()
- cookiePool.Put(c)
-}
-
-var cookiePool = &sync.Pool{
- New: func() interface{} {
- return &Cookie{}
- },
-}
-
-// Cookie represents HTTP response cookie.
-//
-// Do not copy Cookie objects. Create new object and use CopyTo instead.
-//
-// Cookie instance MUST NOT be used from concurrently running goroutines.
-type Cookie struct {
- noCopy noCopy
-
- key []byte
- value []byte
- expire time.Time
- domain []byte
- path []byte
-
- httpOnly bool
- secure bool
-
- bufKV argsKV
- buf []byte
-}
-
-// CopyTo copies src cookie to c.
-func (c *Cookie) CopyTo(src *Cookie) {
- c.Reset()
- c.key = append(c.key[:0], src.key...)
- c.value = append(c.value[:0], src.value...)
- c.expire = src.expire
- c.domain = append(c.domain[:0], src.domain...)
- c.path = append(c.path[:0], src.path...)
- c.httpOnly = src.httpOnly
- c.secure = src.secure
-}
-
-// HTTPOnly returns true if the cookie is http only.
-func (c *Cookie) HTTPOnly() bool {
- return c.httpOnly
-}
-
-// SetHTTPOnly sets cookie's httpOnly flag to the given value.
-func (c *Cookie) SetHTTPOnly(httpOnly bool) {
- c.httpOnly = httpOnly
-}
-
-// Secure returns true if the cookie is secure.
-func (c *Cookie) Secure() bool {
- return c.secure
-}
-
-// SetSecure sets cookie's secure flag to the given value.
-func (c *Cookie) SetSecure(secure bool) {
- c.secure = secure
-}
-
-// Path returns cookie path.
-func (c *Cookie) Path() []byte {
- return c.path
-}
-
-// SetPath sets cookie path.
-func (c *Cookie) SetPath(path string) {
- c.buf = append(c.buf[:0], path...)
- c.path = normalizePath(c.path, c.buf)
-}
-
-// SetPathBytes sets cookie path.
-func (c *Cookie) SetPathBytes(path []byte) {
- c.buf = append(c.buf[:0], path...)
- c.path = normalizePath(c.path, c.buf)
-}
-
-// Domain returns cookie domain.
-//
-// The returned domain is valid until the next Cookie modification method call.
-func (c *Cookie) Domain() []byte {
- return c.domain
-}
-
-// SetDomain sets cookie domain.
-func (c *Cookie) SetDomain(domain string) {
- c.domain = append(c.domain[:0], domain...)
-}
-
-// SetDomainBytes sets cookie domain.
-func (c *Cookie) SetDomainBytes(domain []byte) {
- c.domain = append(c.domain[:0], domain...)
-}
-
-// Expire returns cookie expiration time.
-//
-// CookieExpireUnlimited is returned if cookie doesn't expire
-func (c *Cookie) Expire() time.Time {
- expire := c.expire
- if expire.IsZero() {
- expire = CookieExpireUnlimited
- }
- return expire
-}
-
-// SetExpire sets cookie expiration time.
-//
-// Set expiration time to CookieExpireDelete for expiring (deleting)
-// the cookie on the client.
-//
-// By default cookie lifetime is limited by browser session.
-func (c *Cookie) SetExpire(expire time.Time) {
- c.expire = expire
-}
-
-// Value returns cookie value.
-//
-// The returned value is valid until the next Cookie modification method call.
-func (c *Cookie) Value() []byte {
- return c.value
-}
-
-// SetValue sets cookie value.
-func (c *Cookie) SetValue(value string) {
- c.value = append(c.value[:0], value...)
-}
-
-// SetValueBytes sets cookie value.
-func (c *Cookie) SetValueBytes(value []byte) {
- c.value = append(c.value[:0], value...)
-}
-
-// Key returns cookie name.
-//
-// The returned value is valid until the next Cookie modification method call.
-func (c *Cookie) Key() []byte {
- return c.key
-}
-
-// SetKey sets cookie name.
-func (c *Cookie) SetKey(key string) {
- c.key = append(c.key[:0], key...)
-}
-
-// SetKeyBytes sets cookie name.
-func (c *Cookie) SetKeyBytes(key []byte) {
- c.key = append(c.key[:0], key...)
-}
-
-// Reset clears the cookie.
-func (c *Cookie) Reset() {
- c.key = c.key[:0]
- c.value = c.value[:0]
- c.expire = zeroTime
- c.domain = c.domain[:0]
- c.path = c.path[:0]
- c.httpOnly = false
- c.secure = false
-}
-
-// AppendBytes appends cookie representation to dst and returns
-// the extended dst.
-func (c *Cookie) AppendBytes(dst []byte) []byte {
- if len(c.key) > 0 {
- dst = append(dst, c.key...)
- dst = append(dst, '=')
- }
- dst = append(dst, c.value...)
-
- if !c.expire.IsZero() {
- c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire)
- dst = append(dst, ';', ' ')
- dst = append(dst, strCookieExpires...)
- dst = append(dst, '=')
- dst = append(dst, c.bufKV.value...)
- }
- if len(c.domain) > 0 {
- dst = appendCookiePart(dst, strCookieDomain, c.domain)
- }
- if len(c.path) > 0 {
- dst = appendCookiePart(dst, strCookiePath, c.path)
- }
- if c.httpOnly {
- dst = append(dst, ';', ' ')
- dst = append(dst, strCookieHTTPOnly...)
- }
- if c.secure {
- dst = append(dst, ';', ' ')
- dst = append(dst, strCookieSecure...)
- }
- return dst
-}
-
-// Cookie returns cookie representation.
-//
-// The returned value is valid until the next call to Cookie methods.
-func (c *Cookie) Cookie() []byte {
- c.buf = c.AppendBytes(c.buf[:0])
- return c.buf
-}
-
-// String returns cookie representation.
-func (c *Cookie) String() string {
- return string(c.Cookie())
-}
-
-// WriteTo writes cookie representation to w.
-//
-// WriteTo implements io.WriterTo interface.
-func (c *Cookie) WriteTo(w io.Writer) (int64, error) {
- n, err := w.Write(c.Cookie())
- return int64(n), err
-}
-
-var errNoCookies = errors.New("no cookies found")
-
-// Parse parses Set-Cookie header.
-func (c *Cookie) Parse(src string) error {
- c.buf = append(c.buf[:0], src...)
- return c.ParseBytes(c.buf)
-}
-
-// ParseBytes parses Set-Cookie header.
-func (c *Cookie) ParseBytes(src []byte) error {
- c.Reset()
-
- var s cookieScanner
- s.b = src
-
- kv := &c.bufKV
- if !s.next(kv) {
- return errNoCookies
- }
-
- c.key = append(c.key[:0], kv.key...)
- c.value = append(c.value[:0], kv.value...)
-
- for s.next(kv) {
- if len(kv.key) == 0 && len(kv.value) == 0 {
- continue
- }
- switch string(kv.key) {
- case "expires":
- v := b2s(kv.value)
- exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC)
- if err != nil {
- return err
- }
- c.expire = exptime
- case "domain":
- c.domain = append(c.domain[:0], kv.value...)
- case "path":
- c.path = append(c.path[:0], kv.value...)
- case "":
- switch string(kv.value) {
- case "HttpOnly":
- c.httpOnly = true
- case "secure":
- c.secure = true
- }
- }
- }
- return nil
-}
-
-func appendCookiePart(dst, key, value []byte) []byte {
- dst = append(dst, ';', ' ')
- dst = append(dst, key...)
- dst = append(dst, '=')
- return append(dst, value...)
-}
-
-func getCookieKey(dst, src []byte) []byte {
- n := bytes.IndexByte(src, '=')
- if n >= 0 {
- src = src[:n]
- }
- return decodeCookieArg(dst, src, false)
-}
-
-func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte {
- for i, n := 0, len(cookies); i < n; i++ {
- kv := &cookies[i]
- if len(kv.key) > 0 {
- dst = append(dst, kv.key...)
- dst = append(dst, '=')
- }
- dst = append(dst, kv.value...)
- if i+1 < n {
- dst = append(dst, ';', ' ')
- }
- }
- return dst
-}
-
-func parseRequestCookies(cookies []argsKV, src []byte) []argsKV {
- var s cookieScanner
- s.b = src
- var kv *argsKV
- cookies, kv = allocArg(cookies)
- for s.next(kv) {
- if len(kv.key) > 0 || len(kv.value) > 0 {
- cookies, kv = allocArg(cookies)
- }
- }
- return releaseArg(cookies)
-}
-
-type cookieScanner struct {
- b []byte
-}
-
-func (s *cookieScanner) next(kv *argsKV) bool {
- b := s.b
- if len(b) == 0 {
- return false
- }
-
- isKey := true
- k := 0
- for i, c := range b {
- switch c {
- case '=':
- if isKey {
- isKey = false
- kv.key = decodeCookieArg(kv.key, b[:i], false)
- k = i + 1
- }
- case ';':
- if isKey {
- kv.key = kv.key[:0]
- }
- kv.value = decodeCookieArg(kv.value, b[k:i], true)
- s.b = b[i+1:]
- return true
- }
- }
-
- if isKey {
- kv.key = kv.key[:0]
- }
- kv.value = decodeCookieArg(kv.value, b[k:], true)
- s.b = b[len(b):]
- return true
-}
-
-func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte {
- for len(src) > 0 && src[0] == ' ' {
- src = src[1:]
- }
- for len(src) > 0 && src[len(src)-1] == ' ' {
- src = src[:len(src)-1]
- }
- if skipQuotes {
- if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' {
- src = src[1 : len(src)-1]
- }
- }
- return append(dst[:0], src...)
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/doc.go b/vendor/github.com/VictoriaMetrics/fasthttp/doc.go
deleted file mode 100644
index 8ef161d56..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/doc.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Package fasthttp provides fast HTTP server and client API.
-
-Fasthttp provides the following features:
-
- - Optimized for speed. Easily handles more than 100K qps and more than 1M
- concurrent keep-alive connections on modern hardware.
-
- - Optimized for low memory usage.
-
- - Easy 'Connection: Upgrade' support via RequestCtx.Hijack.
-
- - Server supports requests' pipelining. Multiple requests may be read from
- a single network packet and multiple responses may be sent in a single
- network packet. This may be useful for highly loaded REST services.
-
- - Server provides the following anti-DoS limits:
-
- - The number of concurrent connections.
-
- - The number of concurrent connections per client IP.
-
- - The number of requests per connection.
-
- - Request read timeout.
-
- - Response write timeout.
-
- - Maximum request header size.
-
- - Maximum request body size.
-
- - Maximum request execution time.
-
- - Maximum keep-alive connection lifetime.
-
- - Early filtering out non-GET requests.
-
- - A lot of additional useful info is exposed to request handler:
-
- - Server and client address.
-
- - Per-request logger.
-
- - Unique request id.
-
- - Request start time.
-
- - Connection start time.
-
- - Request sequence number for the current connection.
-
- - Client supports automatic retry on idempotent requests' failure.
-
- - Fasthttp API is designed with the ability to extend existing client
- and server implementations or to write custom client and server
- implementations from scratch.
-*/
-package fasthttp
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/doc.go b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/doc.go
deleted file mode 100644
index 9cf69e710..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package fasthttputil provides utility functions for fasthttp.
-package fasthttputil
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/ecdsa.key b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/ecdsa.key
deleted file mode 100644
index 7e201fc42..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/ecdsa.key
+++ /dev/null
@@ -1,5 +0,0 @@
------BEGIN EC PRIVATE KEY-----
-MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49
-AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh
-pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg==
------END EC PRIVATE KEY-----
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/ecdsa.pem b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/ecdsa.pem
deleted file mode 100644
index ca1a7f2e9..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/ecdsa.pem
+++ /dev/null
@@ -1,10 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw
-DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow
-EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA
-mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7
-1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr
-BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
-hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC
-IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG
------END CERTIFICATE-----
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/inmemory_listener.go b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/inmemory_listener.go
deleted file mode 100644
index d6bcca435..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/inmemory_listener.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package fasthttputil
-
-import (
- "fmt"
- "net"
- "sync"
-)
-
-// InmemoryListener provides in-memory dialer<->net.Listener implementation.
-//
-// It may be used either for fast in-process client<->server communcations
-// without network stack overhead or for client<->server tests.
-type InmemoryListener struct {
- lock sync.Mutex
- closed bool
- conns chan net.Conn
-}
-
-// NewInmemoryListener returns new in-memory dialer<->net.Listener.
-func NewInmemoryListener() *InmemoryListener {
- return &InmemoryListener{
- conns: make(chan net.Conn, 1024),
- }
-}
-
-// Accept implements net.Listener's Accept.
-//
-// It is safe calling Accept from concurrently running goroutines.
-//
-// Accept returns new connection per each Dial call.
-func (ln *InmemoryListener) Accept() (net.Conn, error) {
- c, ok := <-ln.conns
- if !ok {
- return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection")
- }
- return c, nil
-}
-
-// Close implements net.Listener's Close.
-func (ln *InmemoryListener) Close() error {
- var err error
-
- ln.lock.Lock()
- if !ln.closed {
- close(ln.conns)
- ln.closed = true
- } else {
- err = fmt.Errorf("InmemoryListener is already closed")
- }
- ln.lock.Unlock()
- return err
-}
-
-// Addr implements net.Listener's Addr.
-func (ln *InmemoryListener) Addr() net.Addr {
- return &net.UnixAddr{
- Name: "InmemoryListener",
- Net: "memory",
- }
-}
-
-// Dial creates new client<->server connection, enqueues server side
-// of the connection to Accept and returns client side of the connection.
-//
-// It is safe calling Dial from concurrently running goroutines.
-func (ln *InmemoryListener) Dial() (net.Conn, error) {
- pc := NewPipeConns()
- cConn := pc.Conn1()
- sConn := pc.Conn2()
- ln.lock.Lock()
- if !ln.closed {
- ln.conns <- sConn
- } else {
- sConn.Close()
- cConn.Close()
- cConn = nil
- }
- ln.lock.Unlock()
-
- if cConn == nil {
- return nil, fmt.Errorf("InmemoryListener is already closed")
- }
- return cConn, nil
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/pipeconns.go b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/pipeconns.go
deleted file mode 100644
index e5a02351c..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/pipeconns.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package fasthttputil
-
-import (
- "errors"
- "io"
- "net"
- "sync"
- "time"
-)
-
-// NewPipeConns returns new bi-directonal connection pipe.
-func NewPipeConns() *PipeConns {
- ch1 := make(chan *byteBuffer, 4)
- ch2 := make(chan *byteBuffer, 4)
-
- pc := &PipeConns{
- stopCh: make(chan struct{}),
- }
- pc.c1.rCh = ch1
- pc.c1.wCh = ch2
- pc.c2.rCh = ch2
- pc.c2.wCh = ch1
- pc.c1.pc = pc
- pc.c2.pc = pc
- return pc
-}
-
-// PipeConns provides bi-directional connection pipe,
-// which use in-process memory as a transport.
-//
-// PipeConns must be created by calling NewPipeConns.
-//
-// PipeConns has the following additional features comparing to connections
-// returned from net.Pipe():
-//
-// * It is faster.
-// * It buffers Write calls, so there is no need to have concurrent goroutine
-// calling Read in order to unblock each Write call.
-// * It supports read and write deadlines.
-//
-type PipeConns struct {
- c1 pipeConn
- c2 pipeConn
- stopCh chan struct{}
- stopChLock sync.Mutex
-}
-
-// Conn1 returns the first end of bi-directional pipe.
-//
-// Data written to Conn1 may be read from Conn2.
-// Data written to Conn2 may be read from Conn1.
-func (pc *PipeConns) Conn1() net.Conn {
- return &pc.c1
-}
-
-// Conn2 returns the second end of bi-directional pipe.
-//
-// Data written to Conn2 may be read from Conn1.
-// Data written to Conn1 may be read from Conn2.
-func (pc *PipeConns) Conn2() net.Conn {
- return &pc.c2
-}
-
-// Close closes pipe connections.
-func (pc *PipeConns) Close() error {
- pc.stopChLock.Lock()
- select {
- case <-pc.stopCh:
- default:
- close(pc.stopCh)
- }
- pc.stopChLock.Unlock()
-
- return nil
-}
-
-type pipeConn struct {
- b *byteBuffer
- bb []byte
-
- rCh chan *byteBuffer
- wCh chan *byteBuffer
- pc *PipeConns
-
- readDeadlineTimer *time.Timer
- writeDeadlineTimer *time.Timer
-
- readDeadlineCh <-chan time.Time
- writeDeadlineCh <-chan time.Time
-}
-
-func (c *pipeConn) Write(p []byte) (int, error) {
- b := acquireByteBuffer()
- b.b = append(b.b[:0], p...)
-
- select {
- case <-c.pc.stopCh:
- releaseByteBuffer(b)
- return 0, errConnectionClosed
- default:
- }
-
- select {
- case c.wCh <- b:
- default:
- select {
- case c.wCh <- b:
- case <-c.writeDeadlineCh:
- c.writeDeadlineCh = closedDeadlineCh
- return 0, ErrTimeout
- case <-c.pc.stopCh:
- releaseByteBuffer(b)
- return 0, errConnectionClosed
- }
- }
-
- return len(p), nil
-}
-
-func (c *pipeConn) Read(p []byte) (int, error) {
- mayBlock := true
- nn := 0
- for len(p) > 0 {
- n, err := c.read(p, mayBlock)
- nn += n
- if err != nil {
- if !mayBlock && err == errWouldBlock {
- err = nil
- }
- return nn, err
- }
- p = p[n:]
- mayBlock = false
- }
-
- return nn, nil
-}
-
-func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) {
- if len(c.bb) == 0 {
- if err := c.readNextByteBuffer(mayBlock); err != nil {
- return 0, err
- }
- }
- n := copy(p, c.bb)
- c.bb = c.bb[n:]
-
- return n, nil
-}
-
-func (c *pipeConn) readNextByteBuffer(mayBlock bool) error {
- releaseByteBuffer(c.b)
- c.b = nil
-
- select {
- case c.b = <-c.rCh:
- default:
- if !mayBlock {
- return errWouldBlock
- }
- select {
- case c.b = <-c.rCh:
- case <-c.readDeadlineCh:
- c.readDeadlineCh = closedDeadlineCh
- // rCh may contain data when deadline is reached.
- // Read the data before returning ErrTimeout.
- select {
- case c.b = <-c.rCh:
- default:
- return ErrTimeout
- }
- case <-c.pc.stopCh:
- // rCh may contain data when stopCh is closed.
- // Read the data before returning EOF.
- select {
- case c.b = <-c.rCh:
- default:
- return io.EOF
- }
- }
- }
-
- c.bb = c.b.b
- return nil
-}
-
-var (
- errWouldBlock = errors.New("would block")
- errConnectionClosed = errors.New("connection closed")
-
- // ErrTimeout is returned from Read() or Write() on timeout.
- ErrTimeout = errors.New("timeout")
-)
-
-func (c *pipeConn) Close() error {
- return c.pc.Close()
-}
-
-func (c *pipeConn) LocalAddr() net.Addr {
- return pipeAddr(0)
-}
-
-func (c *pipeConn) RemoteAddr() net.Addr {
- return pipeAddr(0)
-}
-
-func (c *pipeConn) SetDeadline(deadline time.Time) error {
- c.SetReadDeadline(deadline)
- c.SetWriteDeadline(deadline)
- return nil
-}
-
-func (c *pipeConn) SetReadDeadline(deadline time.Time) error {
- if c.readDeadlineTimer == nil {
- c.readDeadlineTimer = time.NewTimer(time.Hour)
- }
- c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline)
- return nil
-}
-
-func (c *pipeConn) SetWriteDeadline(deadline time.Time) error {
- if c.writeDeadlineTimer == nil {
- c.writeDeadlineTimer = time.NewTimer(time.Hour)
- }
- c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline)
- return nil
-}
-
-func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time {
- if !t.Stop() {
- select {
- case <-t.C:
- default:
- }
- }
- if deadline.IsZero() {
- return nil
- }
- d := -time.Since(deadline)
- if d <= 0 {
- return closedDeadlineCh
- }
- t.Reset(d)
- return t.C
-}
-
-var closedDeadlineCh = func() <-chan time.Time {
- ch := make(chan time.Time)
- close(ch)
- return ch
-}()
-
-type pipeAddr int
-
-func (pipeAddr) Network() string {
- return "pipe"
-}
-
-func (pipeAddr) String() string {
- return "pipe"
-}
-
-type byteBuffer struct {
- b []byte
-}
-
-func acquireByteBuffer() *byteBuffer {
- return byteBufferPool.Get().(*byteBuffer)
-}
-
-func releaseByteBuffer(b *byteBuffer) {
- if b != nil {
- byteBufferPool.Put(b)
- }
-}
-
-var byteBufferPool = &sync.Pool{
- New: func() interface{} {
- return &byteBuffer{
- b: make([]byte, 1024),
- }
- },
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/rsa.key b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/rsa.key
deleted file mode 100644
index 00a79a3b5..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/rsa.key
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
-3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
-wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
-FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
-IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
-GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
-sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
-sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
-uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
-K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
-YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
-DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
-B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
-Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
-IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
-wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
-wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
-FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
-tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
-fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
-ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
-K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
-6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
-9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
-Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
-c257YgaWmjK9uB0Y2r2VxS0G
------END PRIVATE KEY-----
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/rsa.pem b/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/rsa.pem
deleted file mode 100644
index 93e77cd95..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fasthttputil/rsa.pem
+++ /dev/null
@@ -1,17 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
-BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
-MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
-K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
-+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
-L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
-xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
-6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
-SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
-L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
-45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
-K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
-X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
-whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
------END CERTIFICATE-----
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/fs.go b/vendor/github.com/VictoriaMetrics/fasthttp/fs.go
deleted file mode 100644
index 72629fb25..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/fs.go
+++ /dev/null
@@ -1,1251 +0,0 @@
-package fasthttp
-
-import (
- "bytes"
- "errors"
- "fmt"
- "html"
- "io"
- "io/ioutil"
- "mime"
- "net/http"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
- "time"
-
- "github.com/klauspost/compress/gzip"
-)
-
-// ServeFileBytesUncompressed returns HTTP response containing file contents
-// from the given path.
-//
-// Directory contents is returned if path points to directory.
-//
-// ServeFileBytes may be used for saving network traffic when serving files
-// with good compression ratio.
-//
-// See also RequestCtx.SendFileBytes.
-func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) {
- ServeFileUncompressed(ctx, b2s(path))
-}
-
-// ServeFileUncompressed returns HTTP response containing file contents
-// from the given path.
-//
-// Directory contents is returned if path points to directory.
-//
-// ServeFile may be used for saving network traffic when serving files
-// with good compression ratio.
-//
-// See also RequestCtx.SendFile.
-func ServeFileUncompressed(ctx *RequestCtx, path string) {
- ctx.Request.Header.DelBytes(strAcceptEncoding)
- ServeFile(ctx, path)
-}
-
-// ServeFileBytes returns HTTP response containing compressed file contents
-// from the given path.
-//
-// HTTP response may contain uncompressed file contents in the following cases:
-//
-// - Missing 'Accept-Encoding: gzip' request header.
-// - No write access to directory containing the file.
-//
-// Directory contents is returned if path points to directory.
-//
-// Use ServeFileBytesUncompressed is you don't need serving compressed
-// file contents.
-//
-// See also RequestCtx.SendFileBytes.
-func ServeFileBytes(ctx *RequestCtx, path []byte) {
- ServeFile(ctx, b2s(path))
-}
-
-// ServeFile returns HTTP response containing compressed file contents
-// from the given path.
-//
-// HTTP response may contain uncompressed file contents in the following cases:
-//
-// - Missing 'Accept-Encoding: gzip' request header.
-// - No write access to directory containing the file.
-//
-// Directory contents is returned if path points to directory.
-//
-// Use ServeFileUncompressed is you don't need serving compressed file contents.
-//
-// See also RequestCtx.SendFile.
-func ServeFile(ctx *RequestCtx, path string) {
- rootFSOnce.Do(func() {
- rootFSHandler = rootFS.NewRequestHandler()
- })
- if len(path) == 0 || path[0] != '/' {
- // extend relative path to absolute path
- var err error
- if path, err = filepath.Abs(path); err != nil {
- ctx.Logger().Printf("cannot resolve path %q to absolute file path: %s", path, err)
- ctx.Error("Internal Server Error", StatusInternalServerError)
- return
- }
- }
- ctx.Request.SetRequestURI(path)
- rootFSHandler(ctx)
-}
-
-var (
- rootFSOnce sync.Once
- rootFS = &FS{
- Root: "/",
- GenerateIndexPages: true,
- Compress: true,
- AcceptByteRange: true,
- }
- rootFSHandler RequestHandler
-)
-
-// PathRewriteFunc must return new request path based on arbitrary ctx
-// info such as ctx.Path().
-//
-// Path rewriter is used in FS for translating the current request
-// to the local filesystem path relative to FS.Root.
-//
-// The returned path must not contain '/../' substrings due to security reasons,
-// since such paths may refer files outside FS.Root.
-//
-// The returned path may refer to ctx members. For example, ctx.Path().
-type PathRewriteFunc func(ctx *RequestCtx) []byte
-
-// NewVHostPathRewriter returns path rewriter, which strips slashesCount
-// leading slashes from the path and prepends the path with request's host,
-// thus simplifying virtual hosting for static files.
-//
-// Examples:
-//
-// - host=foobar.com, slashesCount=0, original path="/foo/bar".
-// Resulting path: "/foobar.com/foo/bar"
-//
-// - host=img.aaa.com, slashesCount=1, original path="/images/123/456.jpg"
-// Resulting path: "/img.aaa.com/123/456.jpg"
-func NewVHostPathRewriter(slashesCount int) PathRewriteFunc {
- return func(ctx *RequestCtx) []byte {
- path := stripLeadingSlashes(ctx.Path(), slashesCount)
- host := ctx.Host()
- if n := bytes.IndexByte(host, '/'); n >= 0 {
- host = nil
- }
- if len(host) == 0 {
- host = strInvalidHost
- }
- b := AcquireByteBuffer()
- b.B = append(b.B, '/')
- b.B = append(b.B, host...)
- b.B = append(b.B, path...)
- ctx.URI().SetPathBytes(b.B)
- ReleaseByteBuffer(b)
-
- return ctx.Path()
- }
-}
-
-var strInvalidHost = []byte("invalid-host")
-
-// NewPathSlashesStripper returns path rewriter, which strips slashesCount
-// leading slashes from the path.
-//
-// Examples:
-//
-// - slashesCount = 0, original path: "/foo/bar", result: "/foo/bar"
-// - slashesCount = 1, original path: "/foo/bar", result: "/bar"
-// - slashesCount = 2, original path: "/foo/bar", result: ""
-//
-// The returned path rewriter may be used as FS.PathRewrite .
-func NewPathSlashesStripper(slashesCount int) PathRewriteFunc {
- return func(ctx *RequestCtx) []byte {
- return stripLeadingSlashes(ctx.Path(), slashesCount)
- }
-}
-
-// NewPathPrefixStripper returns path rewriter, which removes prefixSize bytes
-// from the path prefix.
-//
-// Examples:
-//
-// - prefixSize = 0, original path: "/foo/bar", result: "/foo/bar"
-// - prefixSize = 3, original path: "/foo/bar", result: "o/bar"
-// - prefixSize = 7, original path: "/foo/bar", result: "r"
-//
-// The returned path rewriter may be used as FS.PathRewrite .
-func NewPathPrefixStripper(prefixSize int) PathRewriteFunc {
- return func(ctx *RequestCtx) []byte {
- path := ctx.Path()
- if len(path) >= prefixSize {
- path = path[prefixSize:]
- }
- return path
- }
-}
-
-// FS represents settings for request handler serving static files
-// from the local filesystem.
-//
-// It is prohibited copying FS values. Create new values instead.
-type FS struct {
- noCopy noCopy
-
- // Path to the root directory to serve files from.
- Root string
-
- // List of index file names to try opening during directory access.
- //
- // For example:
- //
- // * index.html
- // * index.htm
- // * my-super-index.xml
- //
- // By default the list is empty.
- IndexNames []string
-
- // Index pages for directories without files matching IndexNames
- // are automatically generated if set.
- //
- // Directory index generation may be quite slow for directories
- // with many files (more than 1K), so it is discouraged enabling
- // index pages' generation for such directories.
- //
- // By default index pages aren't generated.
- GenerateIndexPages bool
-
- // Transparently compresses responses if set to true.
- //
- // The server tries minimizing CPU usage by caching compressed files.
- // It adds CompressedFileSuffix suffix to the original file name and
- // tries saving the resulting compressed file under the new file name.
- // So it is advisable to give the server write access to Root
- // and to all inner folders in order to minimze CPU usage when serving
- // compressed responses.
- //
- // Transparent compression is disabled by default.
- Compress bool
-
- // Enables byte range requests if set to true.
- //
- // Byte range requests are disabled by default.
- AcceptByteRange bool
-
- // Path rewriting function.
- //
- // By default request path is not modified.
- PathRewrite PathRewriteFunc
-
- // Expiration duration for inactive file handlers.
- //
- // FSHandlerCacheDuration is used by default.
- CacheDuration time.Duration
-
- // Suffix to add to the name of cached compressed file.
- //
- // This value has sense only if Compress is set.
- //
- // FSCompressedFileSuffix is used by default.
- CompressedFileSuffix string
-
- once sync.Once
- h RequestHandler
-}
-
-// FSCompressedFileSuffix is the suffix FS adds to the original file names
-// when trying to store compressed file under the new file name.
-// See FS.Compress for details.
-const FSCompressedFileSuffix = ".fasthttp.gz"
-
-// FSHandlerCacheDuration is the default expiration duration for inactive
-// file handlers opened by FS.
-const FSHandlerCacheDuration = 10 * time.Second
-
-// FSHandler returns request handler serving static files from
-// the given root folder.
-//
-// stripSlashes indicates how many leading slashes must be stripped
-// from requested path before searching requested file in the root folder.
-// Examples:
-//
-// - stripSlashes = 0, original path: "/foo/bar", result: "/foo/bar"
-// - stripSlashes = 1, original path: "/foo/bar", result: "/bar"
-// - stripSlashes = 2, original path: "/foo/bar", result: ""
-//
-// The returned request handler automatically generates index pages
-// for directories without index.html.
-//
-// The returned handler caches requested file handles
-// for FSHandlerCacheDuration.
-// Make sure your program has enough 'max open files' limit aka
-// 'ulimit -n' if root folder contains many files.
-//
-// Do not create multiple request handler instances for the same
-// (root, stripSlashes) arguments - just reuse a single instance.
-// Otherwise goroutine leak will occur.
-func FSHandler(root string, stripSlashes int) RequestHandler {
- fs := &FS{
- Root: root,
- IndexNames: []string{"index.html"},
- GenerateIndexPages: true,
- AcceptByteRange: true,
- }
- if stripSlashes > 0 {
- fs.PathRewrite = NewPathSlashesStripper(stripSlashes)
- }
- return fs.NewRequestHandler()
-}
-
-// NewRequestHandler returns new request handler with the given FS settings.
-//
-// The returned handler caches requested file handles
-// for FS.CacheDuration.
-// Make sure your program has enough 'max open files' limit aka
-// 'ulimit -n' if FS.Root folder contains many files.
-//
-// Do not create multiple request handlers from a single FS instance -
-// just reuse a single request handler.
-func (fs *FS) NewRequestHandler() RequestHandler {
- fs.once.Do(fs.initRequestHandler)
- return fs.h
-}
-
-func (fs *FS) initRequestHandler() {
- root := fs.Root
-
- // serve files from the current working directory if root is empty
- if len(root) == 0 {
- root = "."
- }
-
- // strip trailing slashes from the root path
- for len(root) > 0 && root[len(root)-1] == '/' {
- root = root[:len(root)-1]
- }
-
- cacheDuration := fs.CacheDuration
- if cacheDuration <= 0 {
- cacheDuration = FSHandlerCacheDuration
- }
- compressedFileSuffix := fs.CompressedFileSuffix
- if len(compressedFileSuffix) == 0 {
- compressedFileSuffix = FSCompressedFileSuffix
- }
-
- h := &fsHandler{
- root: root,
- indexNames: fs.IndexNames,
- pathRewrite: fs.PathRewrite,
- generateIndexPages: fs.GenerateIndexPages,
- compress: fs.Compress,
- acceptByteRange: fs.AcceptByteRange,
- cacheDuration: cacheDuration,
- compressedFileSuffix: compressedFileSuffix,
- cache: make(map[string]*fsFile),
- compressedCache: make(map[string]*fsFile),
- }
-
- go func() {
- var pendingFiles []*fsFile
- for {
- time.Sleep(cacheDuration / 2)
- pendingFiles = h.cleanCache(pendingFiles)
- }
- }()
-
- fs.h = h.handleRequest
-}
-
-type fsHandler struct {
- root string
- indexNames []string
- pathRewrite PathRewriteFunc
- generateIndexPages bool
- compress bool
- acceptByteRange bool
- cacheDuration time.Duration
- compressedFileSuffix string
-
- cache map[string]*fsFile
- compressedCache map[string]*fsFile
- cacheLock sync.Mutex
-
- smallFileReaderPool sync.Pool
-}
-
-type fsFile struct {
- h *fsHandler
- f *os.File
- dirIndex []byte
- contentType string
- contentLength int
- compressed bool
-
- lastModified time.Time
- lastModifiedStr []byte
-
- t time.Time
- readersCount int
-
- bigFiles []*bigFileReader
- bigFilesLock sync.Mutex
-}
-
-func (ff *fsFile) NewReader() (io.Reader, error) {
- if ff.isBig() {
- r, err := ff.bigFileReader()
- if err != nil {
- ff.decReadersCount()
- }
- return r, err
- }
- return ff.smallFileReader(), nil
-}
-
-func (ff *fsFile) smallFileReader() io.Reader {
- v := ff.h.smallFileReaderPool.Get()
- if v == nil {
- v = &fsSmallFileReader{}
- }
- r := v.(*fsSmallFileReader)
- r.ff = ff
- r.endPos = ff.contentLength
- if r.startPos > 0 {
- panic("BUG: fsSmallFileReader with non-nil startPos found in the pool")
- }
- return r
-}
-
-// files bigger than this size are sent with sendfile
-const maxSmallFileSize = 2 * 4096
-
-func (ff *fsFile) isBig() bool {
- return ff.contentLength > maxSmallFileSize && len(ff.dirIndex) == 0
-}
-
-func (ff *fsFile) bigFileReader() (io.Reader, error) {
- if ff.f == nil {
- panic("BUG: ff.f must be non-nil in bigFileReader")
- }
-
- var r io.Reader
-
- ff.bigFilesLock.Lock()
- n := len(ff.bigFiles)
- if n > 0 {
- r = ff.bigFiles[n-1]
- ff.bigFiles = ff.bigFiles[:n-1]
- }
- ff.bigFilesLock.Unlock()
-
- if r != nil {
- return r, nil
- }
-
- f, err := os.Open(ff.f.Name())
- if err != nil {
- return nil, fmt.Errorf("cannot open already opened file: %s", err)
- }
- return &bigFileReader{
- f: f,
- ff: ff,
- r: f,
- }, nil
-}
-
-func (ff *fsFile) Release() {
- if ff.f != nil {
- ff.f.Close()
-
- if ff.isBig() {
- ff.bigFilesLock.Lock()
- for _, r := range ff.bigFiles {
- r.f.Close()
- }
- ff.bigFilesLock.Unlock()
- }
- }
-}
-
-func (ff *fsFile) decReadersCount() {
- ff.h.cacheLock.Lock()
- ff.readersCount--
- if ff.readersCount < 0 {
- panic("BUG: negative fsFile.readersCount!")
- }
- ff.h.cacheLock.Unlock()
-}
-
-// bigFileReader attempts to trigger sendfile
-// for sending big files over the wire.
-type bigFileReader struct {
- f *os.File
- ff *fsFile
- r io.Reader
- lr io.LimitedReader
-}
-
-func (r *bigFileReader) UpdateByteRange(startPos, endPos int) error {
- if _, err := r.f.Seek(int64(startPos), 0); err != nil {
- return err
- }
- r.r = &r.lr
- r.lr.R = r.f
- r.lr.N = int64(endPos - startPos + 1)
- return nil
-}
-
-func (r *bigFileReader) Read(p []byte) (int, error) {
- return r.r.Read(p)
-}
-
-func (r *bigFileReader) WriteTo(w io.Writer) (int64, error) {
- if rf, ok := w.(io.ReaderFrom); ok {
- // fast path. Senfile must be triggered
- return rf.ReadFrom(r.r)
- }
-
- // slow path
- return copyZeroAlloc(w, r.r)
-}
-
-func (r *bigFileReader) Close() error {
- r.r = r.f
- n, err := r.f.Seek(0, 0)
- if err == nil {
- if n != 0 {
- panic("BUG: File.Seek(0,0) returned (non-zero, nil)")
- }
-
- ff := r.ff
- ff.bigFilesLock.Lock()
- ff.bigFiles = append(ff.bigFiles, r)
- ff.bigFilesLock.Unlock()
- } else {
- r.f.Close()
- }
- r.ff.decReadersCount()
- return err
-}
-
-type fsSmallFileReader struct {
- ff *fsFile
- startPos int
- endPos int
-}
-
-func (r *fsSmallFileReader) Close() error {
- ff := r.ff
- ff.decReadersCount()
- r.ff = nil
- r.startPos = 0
- r.endPos = 0
- ff.h.smallFileReaderPool.Put(r)
- return nil
-}
-
-func (r *fsSmallFileReader) UpdateByteRange(startPos, endPos int) error {
- r.startPos = startPos
- r.endPos = endPos + 1
- return nil
-}
-
-func (r *fsSmallFileReader) Read(p []byte) (int, error) {
- tailLen := r.endPos - r.startPos
- if tailLen <= 0 {
- return 0, io.EOF
- }
- if len(p) > tailLen {
- p = p[:tailLen]
- }
-
- ff := r.ff
- if ff.f != nil {
- n, err := ff.f.ReadAt(p, int64(r.startPos))
- r.startPos += n
- return n, err
- }
-
- n := copy(p, ff.dirIndex[r.startPos:])
- r.startPos += n
- return n, nil
-}
-
-func (r *fsSmallFileReader) WriteTo(w io.Writer) (int64, error) {
- ff := r.ff
-
- var n int
- var err error
- if ff.f == nil {
- n, err = w.Write(ff.dirIndex[r.startPos:r.endPos])
- return int64(n), err
- }
-
- if rf, ok := w.(io.ReaderFrom); ok {
- return rf.ReadFrom(r)
- }
-
- curPos := r.startPos
- bufv := copyBufPool.Get().(*copyBuf)
- buf := bufv.b[:]
- for err == nil {
- tailLen := r.endPos - curPos
- if tailLen <= 0 {
- break
- }
- if len(buf) > tailLen {
- buf = buf[:tailLen]
- }
- n, err = ff.f.ReadAt(buf, int64(curPos))
- nw, errw := w.Write(buf[:n])
- curPos += nw
- if errw == nil && nw != n {
- panic("BUG: Write(p) returned (n, nil), where n != len(p)")
- }
- if err == nil {
- err = errw
- }
- }
- copyBufPool.Put(bufv)
-
- if err == io.EOF {
- err = nil
- }
- return int64(curPos - r.startPos), err
-}
-
-func (h *fsHandler) cleanCache(pendingFiles []*fsFile) []*fsFile {
- var filesToRelease []*fsFile
-
- h.cacheLock.Lock()
-
- // Close files which couldn't be closed before due to non-zero
- // readers count on the previous run.
- var remainingFiles []*fsFile
- for _, ff := range pendingFiles {
- if ff.readersCount > 0 {
- remainingFiles = append(remainingFiles, ff)
- } else {
- filesToRelease = append(filesToRelease, ff)
- }
- }
- pendingFiles = remainingFiles
-
- pendingFiles, filesToRelease = cleanCacheNolock(h.cache, pendingFiles, filesToRelease, h.cacheDuration)
- pendingFiles, filesToRelease = cleanCacheNolock(h.compressedCache, pendingFiles, filesToRelease, h.cacheDuration)
-
- h.cacheLock.Unlock()
-
- for _, ff := range filesToRelease {
- ff.Release()
- }
-
- return pendingFiles
-}
-
-func cleanCacheNolock(cache map[string]*fsFile, pendingFiles, filesToRelease []*fsFile, cacheDuration time.Duration) ([]*fsFile, []*fsFile) {
- t := time.Now()
- for k, ff := range cache {
- if t.Sub(ff.t) > cacheDuration {
- if ff.readersCount > 0 {
- // There are pending readers on stale file handle,
- // so we cannot close it. Put it into pendingFiles
- // so it will be closed later.
- pendingFiles = append(pendingFiles, ff)
- } else {
- filesToRelease = append(filesToRelease, ff)
- }
- delete(cache, k)
- }
- }
- return pendingFiles, filesToRelease
-}
-
-func (h *fsHandler) handleRequest(ctx *RequestCtx) {
- var path []byte
- if h.pathRewrite != nil {
- path = h.pathRewrite(ctx)
- } else {
- path = ctx.Path()
- }
- path = stripTrailingSlashes(path)
-
- if n := bytes.IndexByte(path, 0); n >= 0 {
- ctx.Logger().Printf("cannot serve path with nil byte at position %d: %q", n, path)
- ctx.Error("Are you a hacker?", StatusBadRequest)
- return
- }
- if h.pathRewrite != nil {
- // There is no need to check for '/../' if path = ctx.Path(),
- // since ctx.Path must normalize and sanitize the path.
-
- if n := bytes.Index(path, strSlashDotDotSlash); n >= 0 {
- ctx.Logger().Printf("cannot serve path with '/../' at position %d due to security reasons: %q", n, path)
- ctx.Error("Internal Server Error", StatusInternalServerError)
- return
- }
- }
-
- mustCompress := false
- fileCache := h.cache
- byteRange := ctx.Request.Header.peek(strRange)
- if len(byteRange) == 0 && h.compress && ctx.Request.Header.HasAcceptEncodingBytes(strGzip) {
- mustCompress = true
- fileCache = h.compressedCache
- }
-
- h.cacheLock.Lock()
- ff, ok := fileCache[string(path)]
- if ok {
- ff.readersCount++
- }
- h.cacheLock.Unlock()
-
- if !ok {
- pathStr := string(path)
- filePath := h.root + pathStr
- var err error
- ff, err = h.openFSFile(filePath, mustCompress)
- if mustCompress && err == errNoCreatePermission {
- ctx.Logger().Printf("insufficient permissions for saving compressed file for %q. Serving uncompressed file. "+
- "Allow write access to the directory with this file in order to improve fasthttp performance", filePath)
- mustCompress = false
- ff, err = h.openFSFile(filePath, mustCompress)
- }
- if err == errDirIndexRequired {
- ff, err = h.openIndexFile(ctx, filePath, mustCompress)
- if err != nil {
- ctx.Logger().Printf("cannot open dir index %q: %s", filePath, err)
- ctx.Error("Directory index is forbidden", StatusForbidden)
- return
- }
- } else if err != nil {
- ctx.Logger().Printf("cannot open file %q: %s", filePath, err)
- ctx.Error("Cannot open requested path", StatusNotFound)
- return
- }
-
- h.cacheLock.Lock()
- ff1, ok := fileCache[pathStr]
- if !ok {
- fileCache[pathStr] = ff
- ff.readersCount++
- } else {
- ff1.readersCount++
- }
- h.cacheLock.Unlock()
-
- if ok {
- // The file has been already opened by another
- // goroutine, so close the current file and use
- // the file opened by another goroutine instead.
- ff.Release()
- ff = ff1
- }
- }
-
- if !ctx.IfModifiedSince(ff.lastModified) {
- ff.decReadersCount()
- ctx.NotModified()
- return
- }
-
- r, err := ff.NewReader()
- if err != nil {
- ctx.Logger().Printf("cannot obtain file reader for path=%q: %s", path, err)
- ctx.Error("Internal Server Error", StatusInternalServerError)
- return
- }
-
- hdr := &ctx.Response.Header
- if ff.compressed {
- hdr.SetCanonical(strContentEncoding, strGzip)
- }
-
- statusCode := StatusOK
- contentLength := ff.contentLength
- if h.acceptByteRange {
- hdr.SetCanonical(strAcceptRanges, strBytes)
- if len(byteRange) > 0 {
- startPos, endPos, err := ParseByteRange(byteRange, contentLength)
- if err != nil {
- r.(io.Closer).Close()
- ctx.Logger().Printf("cannot parse byte range %q for path=%q: %s", byteRange, path, err)
- ctx.Error("Range Not Satisfiable", StatusRequestedRangeNotSatisfiable)
- return
- }
-
- if err = r.(byteRangeUpdater).UpdateByteRange(startPos, endPos); err != nil {
- r.(io.Closer).Close()
- ctx.Logger().Printf("cannot seek byte range %q for path=%q: %s", byteRange, path, err)
- ctx.Error("Internal Server Error", StatusInternalServerError)
- return
- }
-
- hdr.SetContentRange(startPos, endPos, contentLength)
- contentLength = endPos - startPos + 1
- statusCode = StatusPartialContent
- }
- }
-
- hdr.SetCanonical(strLastModified, ff.lastModifiedStr)
- if !ctx.IsHead() {
- ctx.SetBodyStream(r, contentLength)
- } else {
- ctx.Response.ResetBody()
- ctx.Response.SkipBody = true
- ctx.Response.Header.SetContentLength(contentLength)
- if rc, ok := r.(io.Closer); ok {
- if err := rc.Close(); err != nil {
- ctx.Logger().Printf("cannot close file reader: %s", err)
- ctx.Error("Internal Server Error", StatusInternalServerError)
- return
- }
- }
- }
- ctx.SetContentType(ff.contentType)
- ctx.SetStatusCode(statusCode)
-}
-
-type byteRangeUpdater interface {
- UpdateByteRange(startPos, endPos int) error
-}
-
-// ParseByteRange parses 'Range: bytes=...' header value.
-//
-// It follows https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 .
-func ParseByteRange(byteRange []byte, contentLength int) (startPos, endPos int, err error) {
- b := byteRange
- if !bytes.HasPrefix(b, strBytes) {
- return 0, 0, fmt.Errorf("unsupported range units: %q. Expecting %q", byteRange, strBytes)
- }
-
- b = b[len(strBytes):]
- if len(b) == 0 || b[0] != '=' {
- return 0, 0, fmt.Errorf("missing byte range in %q", byteRange)
- }
- b = b[1:]
-
- n := bytes.IndexByte(b, '-')
- if n < 0 {
- return 0, 0, fmt.Errorf("missing the end position of byte range in %q", byteRange)
- }
-
- if n == 0 {
- v, err := ParseUint(b[n+1:])
- if err != nil {
- return 0, 0, err
- }
- startPos := contentLength - v
- if startPos < 0 {
- startPos = 0
- }
- return startPos, contentLength - 1, nil
- }
-
- if startPos, err = ParseUint(b[:n]); err != nil {
- return 0, 0, err
- }
- if startPos >= contentLength {
- return 0, 0, fmt.Errorf("the start position of byte range cannot exceed %d. byte range %q", contentLength-1, byteRange)
- }
-
- b = b[n+1:]
- if len(b) == 0 {
- return startPos, contentLength - 1, nil
- }
-
- if endPos, err = ParseUint(b); err != nil {
- return 0, 0, err
- }
- if endPos >= contentLength {
- endPos = contentLength - 1
- }
- if endPos < startPos {
- return 0, 0, fmt.Errorf("the start position of byte range cannot exceed the end position. byte range %q", byteRange)
- }
- return startPos, endPos, nil
-}
-
-func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress bool) (*fsFile, error) {
- for _, indexName := range h.indexNames {
- indexFilePath := dirPath + "/" + indexName
- ff, err := h.openFSFile(indexFilePath, mustCompress)
- if err == nil {
- return ff, nil
- }
- if !os.IsNotExist(err) {
- return nil, fmt.Errorf("cannot open file %q: %s", indexFilePath, err)
- }
- }
-
- if !h.generateIndexPages {
- return nil, fmt.Errorf("cannot access directory without index page. Directory %q", dirPath)
- }
-
- return h.createDirIndex(ctx.URI(), dirPath, mustCompress)
-}
-
-var (
- errDirIndexRequired = errors.New("directory index required")
- errNoCreatePermission = errors.New("no 'create file' permissions")
-)
-
-func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) (*fsFile, error) {
- w := &ByteBuffer{}
-
- basePathEscaped := html.EscapeString(string(base.Path()))
- fmt.Fprintf(w, "
%s", basePathEscaped)
- fmt.Fprintf(w, "%s
", basePathEscaped)
- fmt.Fprintf(w, "")
-
- if len(basePathEscaped) > 1 {
- var parentURI URI
- base.CopyTo(&parentURI)
- parentURI.Update(string(base.Path()) + "/..")
- parentPathEscaped := html.EscapeString(string(parentURI.Path()))
- fmt.Fprintf(w, `- ..
`, parentPathEscaped)
- }
-
- f, err := os.Open(dirPath)
- if err != nil {
- return nil, err
- }
-
- fileinfos, err := f.Readdir(0)
- f.Close()
- if err != nil {
- return nil, err
- }
-
- fm := make(map[string]os.FileInfo, len(fileinfos))
- var filenames []string
- for _, fi := range fileinfos {
- name := fi.Name()
- if strings.HasSuffix(name, h.compressedFileSuffix) {
- // Do not show compressed files on index page.
- continue
- }
- fm[name] = fi
- filenames = append(filenames, name)
- }
-
- var u URI
- base.CopyTo(&u)
- u.Update(string(u.Path()) + "/")
-
- sort.Sort(sort.StringSlice(filenames))
- for _, name := range filenames {
- u.Update(name)
- pathEscaped := html.EscapeString(string(u.Path()))
- fi := fm[name]
- auxStr := "dir"
- className := "dir"
- if !fi.IsDir() {
- auxStr = fmt.Sprintf("file, %d bytes", fi.Size())
- className = "file"
- }
- fmt.Fprintf(w, `- %s, %s, last modified %s
`,
- pathEscaped, className, html.EscapeString(name), auxStr, fsModTime(fi.ModTime()))
- }
-
- fmt.Fprintf(w, "
")
-
- if mustCompress {
- var zbuf ByteBuffer
- zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression)
- w = &zbuf
- }
-
- dirIndex := w.B
- lastModified := time.Now()
- ff := &fsFile{
- h: h,
- dirIndex: dirIndex,
- contentType: "text/html; charset=utf-8",
- contentLength: len(dirIndex),
- compressed: mustCompress,
- lastModified: lastModified,
- lastModifiedStr: AppendHTTPDate(nil, lastModified),
-
- t: lastModified,
- }
- return ff, nil
-}
-
-const (
- fsMinCompressRatio = 0.8
- fsMaxCompressibleFileSize = 8 * 1024 * 1024
-)
-
-func (h *fsHandler) compressAndOpenFSFile(filePath string) (*fsFile, error) {
- f, err := os.Open(filePath)
- if err != nil {
- return nil, err
- }
-
- fileInfo, err := f.Stat()
- if err != nil {
- f.Close()
- return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err)
- }
-
- if fileInfo.IsDir() {
- f.Close()
- return nil, errDirIndexRequired
- }
-
- if strings.HasSuffix(filePath, h.compressedFileSuffix) ||
- fileInfo.Size() > fsMaxCompressibleFileSize ||
- !isFileCompressible(f, fsMinCompressRatio) {
- return h.newFSFile(f, fileInfo, false)
- }
-
- compressedFilePath := filePath + h.compressedFileSuffix
- absPath, err := filepath.Abs(compressedFilePath)
- if err != nil {
- f.Close()
- return nil, fmt.Errorf("cannot determine absolute path for %q: %s", compressedFilePath, err)
- }
-
- flock := getFileLock(absPath)
- flock.Lock()
- ff, err := h.compressFileNolock(f, fileInfo, filePath, compressedFilePath)
- flock.Unlock()
-
- return ff, err
-}
-
-func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePath, compressedFilePath string) (*fsFile, error) {
- // Attempt to open compressed file created by another concurrent
- // goroutine.
- // It is safe opening such a file, since the file creation
- // is guarded by file mutex - see getFileLock call.
- if _, err := os.Stat(compressedFilePath); err == nil {
- f.Close()
- return h.newCompressedFSFile(compressedFilePath)
- }
-
- // Create temporary file, so concurrent goroutines don't use
- // it until it is created.
- tmpFilePath := compressedFilePath + ".tmp"
- zf, err := os.Create(tmpFilePath)
- if err != nil {
- f.Close()
- if !os.IsPermission(err) {
- return nil, fmt.Errorf("cannot create temporary file %q: %s", tmpFilePath, err)
- }
- return nil, errNoCreatePermission
- }
-
- zw := acquireStacklessGzipWriter(zf, CompressDefaultCompression)
- _, err = copyZeroAlloc(zw, f)
- if err1 := zw.Flush(); err == nil {
- err = err1
- }
- releaseStacklessGzipWriter(zw, CompressDefaultCompression)
- zf.Close()
- f.Close()
- if err != nil {
- return nil, fmt.Errorf("error when compressing file %q to %q: %s", filePath, tmpFilePath, err)
- }
- if err = os.Chtimes(tmpFilePath, time.Now(), fileInfo.ModTime()); err != nil {
- return nil, fmt.Errorf("cannot change modification time to %s for tmp file %q: %s",
- fileInfo.ModTime(), tmpFilePath, err)
- }
- if err = os.Rename(tmpFilePath, compressedFilePath); err != nil {
- return nil, fmt.Errorf("cannot move compressed file from %q to %q: %s", tmpFilePath, compressedFilePath, err)
- }
- return h.newCompressedFSFile(compressedFilePath)
-}
-
-func (h *fsHandler) newCompressedFSFile(filePath string) (*fsFile, error) {
- f, err := os.Open(filePath)
- if err != nil {
- return nil, fmt.Errorf("cannot open compressed file %q: %s", filePath, err)
- }
- fileInfo, err := f.Stat()
- if err != nil {
- f.Close()
- return nil, fmt.Errorf("cannot obtain info for compressed file %q: %s", filePath, err)
- }
- return h.newFSFile(f, fileInfo, true)
-}
-
-func (h *fsHandler) openFSFile(filePath string, mustCompress bool) (*fsFile, error) {
- filePathOriginal := filePath
- if mustCompress {
- filePath += h.compressedFileSuffix
- }
-
- f, err := os.Open(filePath)
- if err != nil {
- if mustCompress && os.IsNotExist(err) {
- return h.compressAndOpenFSFile(filePathOriginal)
- }
- return nil, err
- }
-
- fileInfo, err := f.Stat()
- if err != nil {
- f.Close()
- return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err)
- }
-
- if fileInfo.IsDir() {
- f.Close()
- if mustCompress {
- return nil, fmt.Errorf("directory with unexpected suffix found: %q. Suffix: %q",
- filePath, h.compressedFileSuffix)
- }
- return nil, errDirIndexRequired
- }
-
- if mustCompress {
- fileInfoOriginal, err := os.Stat(filePathOriginal)
- if err != nil {
- f.Close()
- return nil, fmt.Errorf("cannot obtain info for original file %q: %s", filePathOriginal, err)
- }
-
- if fileInfoOriginal.ModTime() != fileInfo.ModTime() {
- // The compressed file became stale. Re-create it.
- f.Close()
- os.Remove(filePath)
- return h.compressAndOpenFSFile(filePathOriginal)
- }
- }
-
- return h.newFSFile(f, fileInfo, mustCompress)
-}
-
-func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) {
- n := fileInfo.Size()
- contentLength := int(n)
- if n != int64(contentLength) {
- f.Close()
- return nil, fmt.Errorf("too big file: %d bytes", n)
- }
-
- // detect content-type
- ext := fileExtension(fileInfo.Name(), compressed, h.compressedFileSuffix)
- contentType := mime.TypeByExtension(ext)
- if len(contentType) == 0 {
- data, err := readFileHeader(f, compressed)
- if err != nil {
- return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err)
- }
- contentType = http.DetectContentType(data)
- }
-
- lastModified := fileInfo.ModTime()
- ff := &fsFile{
- h: h,
- f: f,
- contentType: contentType,
- contentLength: contentLength,
- compressed: compressed,
- lastModified: lastModified,
- lastModifiedStr: AppendHTTPDate(nil, lastModified),
-
- t: time.Now(),
- }
- return ff, nil
-}
-
-func readFileHeader(f *os.File, compressed bool) ([]byte, error) {
- r := io.Reader(f)
- var zr *gzip.Reader
- if compressed {
- var err error
- if zr, err = acquireGzipReader(f); err != nil {
- return nil, err
- }
- r = zr
- }
-
- lr := &io.LimitedReader{
- R: r,
- N: 512,
- }
- data, err := ioutil.ReadAll(lr)
- f.Seek(0, 0)
-
- if zr != nil {
- releaseGzipReader(zr)
- }
-
- return data, err
-}
-
-func stripLeadingSlashes(path []byte, stripSlashes int) []byte {
- for stripSlashes > 0 && len(path) > 0 {
- if path[0] != '/' {
- panic("BUG: path must start with slash")
- }
- n := bytes.IndexByte(path[1:], '/')
- if n < 0 {
- path = path[:0]
- break
- }
- path = path[n+1:]
- stripSlashes--
- }
- return path
-}
-
-func stripTrailingSlashes(path []byte) []byte {
- for len(path) > 0 && path[len(path)-1] == '/' {
- path = path[:len(path)-1]
- }
- return path
-}
-
-func fileExtension(path string, compressed bool, compressedFileSuffix string) string {
- if compressed && strings.HasSuffix(path, compressedFileSuffix) {
- path = path[:len(path)-len(compressedFileSuffix)]
- }
- n := strings.LastIndexByte(path, '.')
- if n < 0 {
- return ""
- }
- return path[n:]
-}
-
-// FileLastModified returns last modified time for the file.
-func FileLastModified(path string) (time.Time, error) {
- f, err := os.Open(path)
- if err != nil {
- return zeroTime, err
- }
- fileInfo, err := f.Stat()
- f.Close()
- if err != nil {
- return zeroTime, err
- }
- return fsModTime(fileInfo.ModTime()), nil
-}
-
-func fsModTime(t time.Time) time.Time {
- return t.In(time.UTC).Truncate(time.Second)
-}
-
-var (
- filesLockMap = make(map[string]*sync.Mutex)
- filesLockMapLock sync.Mutex
-)
-
-func getFileLock(absPath string) *sync.Mutex {
- filesLockMapLock.Lock()
- flock := filesLockMap[absPath]
- if flock == nil {
- flock = &sync.Mutex{}
- filesLockMap[absPath] = flock
- }
- filesLockMapLock.Unlock()
- return flock
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/header.go b/vendor/github.com/VictoriaMetrics/fasthttp/header.go
deleted file mode 100644
index 711329180..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/header.go
+++ /dev/null
@@ -1,2037 +0,0 @@
-package fasthttp
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "sync/atomic"
- "time"
-)
-
-// ResponseHeader represents HTTP response header.
-//
-// It is forbidden copying ResponseHeader instances.
-// Create new instances instead and use CopyTo.
-//
-// ResponseHeader instance MUST NOT be used from concurrently running
-// goroutines.
-type ResponseHeader struct {
- noCopy noCopy
-
- noHTTP11 bool
- connectionClose bool
-
- statusCode int
- contentLength int
- contentLengthBytes []byte
-
- contentType []byte
- server []byte
-
- h []argsKV
- bufKV argsKV
-
- cookies []argsKV
-}
-
-// RequestHeader represents HTTP request header.
-//
-// It is forbidden copying RequestHeader instances.
-// Create new instances instead and use CopyTo.
-//
-// RequestHeader instance MUST NOT be used from concurrently running
-// goroutines.
-type RequestHeader struct {
- noCopy noCopy
-
- noHTTP11 bool
- connectionClose bool
- isGet bool
-
- // These two fields have been moved close to other bool fields
- // for reducing RequestHeader object size.
- cookiesCollected bool
- rawHeadersParsed bool
-
- contentLength int
- contentLengthBytes []byte
-
- method []byte
- requestURI []byte
- host []byte
- contentType []byte
- userAgent []byte
-
- h []argsKV
- bufKV argsKV
-
- cookies []argsKV
-
- rawHeaders []byte
-}
-
-// SetContentRange sets 'Content-Range: bytes startPos-endPos/contentLength'
-// header.
-func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) {
- b := h.bufKV.value[:0]
- b = append(b, strBytes...)
- b = append(b, ' ')
- b = AppendUint(b, startPos)
- b = append(b, '-')
- b = AppendUint(b, endPos)
- b = append(b, '/')
- b = AppendUint(b, contentLength)
- h.bufKV.value = b
-
- h.SetCanonical(strContentRange, h.bufKV.value)
-}
-
-// SetByteRange sets 'Range: bytes=startPos-endPos' header.
-//
-// - If startPos is negative, then 'bytes=-startPos' value is set.
-// - If endPos is negative, then 'bytes=startPos-' value is set.
-func (h *RequestHeader) SetByteRange(startPos, endPos int) {
- h.parseRawHeaders()
-
- b := h.bufKV.value[:0]
- b = append(b, strBytes...)
- b = append(b, '=')
- if startPos >= 0 {
- b = AppendUint(b, startPos)
- } else {
- endPos = -startPos
- }
- b = append(b, '-')
- if endPos >= 0 {
- b = AppendUint(b, endPos)
- }
- h.bufKV.value = b
-
- h.SetCanonical(strRange, h.bufKV.value)
-}
-
-// StatusCode returns response status code.
-func (h *ResponseHeader) StatusCode() int {
- if h.statusCode == 0 {
- return StatusOK
- }
- return h.statusCode
-}
-
-// SetStatusCode sets response status code.
-func (h *ResponseHeader) SetStatusCode(statusCode int) {
- h.statusCode = statusCode
-}
-
-// SetLastModified sets 'Last-Modified' header to the given value.
-func (h *ResponseHeader) SetLastModified(t time.Time) {
- h.bufKV.value = AppendHTTPDate(h.bufKV.value[:0], t)
- h.SetCanonical(strLastModified, h.bufKV.value)
-}
-
-// ConnectionClose returns true if 'Connection: close' header is set.
-func (h *ResponseHeader) ConnectionClose() bool {
- return h.connectionClose
-}
-
-// SetConnectionClose sets 'Connection: close' header.
-func (h *ResponseHeader) SetConnectionClose() {
- h.connectionClose = true
-}
-
-// ResetConnectionClose clears 'Connection: close' header if it exists.
-func (h *ResponseHeader) ResetConnectionClose() {
- if h.connectionClose {
- h.connectionClose = false
- h.h = delAllArgsBytes(h.h, strConnection)
- }
-}
-
-// ConnectionClose returns true if 'Connection: close' header is set.
-func (h *RequestHeader) ConnectionClose() bool {
- h.parseRawHeaders()
- return h.connectionClose
-}
-
-func (h *RequestHeader) connectionCloseFast() bool {
- // h.parseRawHeaders() isn't called for performance reasons.
- // Use ConnectionClose for triggering raw headers parsing.
- return h.connectionClose
-}
-
-// SetConnectionClose sets 'Connection: close' header.
-func (h *RequestHeader) SetConnectionClose() {
- // h.parseRawHeaders() isn't called for performance reasons.
- h.connectionClose = true
-}
-
-// ResetConnectionClose clears 'Connection: close' header if it exists.
-func (h *RequestHeader) ResetConnectionClose() {
- h.parseRawHeaders()
- if h.connectionClose {
- h.connectionClose = false
- h.h = delAllArgsBytes(h.h, strConnection)
- }
-}
-
-// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set.
-func (h *ResponseHeader) ConnectionUpgrade() bool {
- return hasHeaderValue(h.Peek("Connection"), strUpgrade)
-}
-
-// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set.
-func (h *RequestHeader) ConnectionUpgrade() bool {
- h.parseRawHeaders()
- return hasHeaderValue(h.Peek("Connection"), strUpgrade)
-}
-
-// ContentLength returns Content-Length header value.
-//
-// It may be negative:
-// -1 means Transfer-Encoding: chunked.
-// -2 means Transfer-Encoding: identity.
-func (h *ResponseHeader) ContentLength() int {
- return h.contentLength
-}
-
-// SetContentLength sets Content-Length header value.
-//
-// Content-Length may be negative:
-// -1 means Transfer-Encoding: chunked.
-// -2 means Transfer-Encoding: identity.
-func (h *ResponseHeader) SetContentLength(contentLength int) {
- if h.mustSkipContentLength() {
- return
- }
- h.contentLength = contentLength
- if contentLength >= 0 {
- h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength)
- h.h = delAllArgsBytes(h.h, strTransferEncoding)
- } else {
- h.contentLengthBytes = h.contentLengthBytes[:0]
- value := strChunked
- if contentLength == -2 {
- h.SetConnectionClose()
- value = strIdentity
- }
- h.h = setArgBytes(h.h, strTransferEncoding, value)
- }
-}
-
-func (h *ResponseHeader) mustSkipContentLength() bool {
- // From http/1.1 specs:
- // All 1xx (informational), 204 (no content), and 304 (not modified) responses MUST NOT include a message-body
- statusCode := h.StatusCode()
-
- // Fast path.
- if statusCode < 100 || statusCode == StatusOK {
- return false
- }
-
- // Slow path.
- return statusCode == StatusNotModified || statusCode == StatusNoContent || statusCode < 200
-}
-
-// ContentLength returns Content-Length header value.
-//
-// It may be negative:
-// -1 means Transfer-Encoding: chunked.
-func (h *RequestHeader) ContentLength() int {
- if h.noBody() {
- return 0
- }
- h.parseRawHeaders()
- return h.contentLength
-}
-
-// SetContentLength sets Content-Length header value.
-//
-// Negative content-length sets 'Transfer-Encoding: chunked' header.
-func (h *RequestHeader) SetContentLength(contentLength int) {
- h.parseRawHeaders()
- h.contentLength = contentLength
- if contentLength >= 0 {
- h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength)
- h.h = delAllArgsBytes(h.h, strTransferEncoding)
- } else {
- h.contentLengthBytes = h.contentLengthBytes[:0]
- h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
- }
-}
-
-func (h *ResponseHeader) isCompressibleContentType() bool {
- contentType := h.ContentType()
- return bytes.HasPrefix(contentType, strTextSlash) ||
- bytes.HasPrefix(contentType, strApplicationSlash)
-}
-
-// ContentType returns Content-Type header value.
-func (h *ResponseHeader) ContentType() []byte {
- contentType := h.contentType
- if len(h.contentType) == 0 {
- contentType = defaultContentType
- }
- return contentType
-}
-
-// SetContentType sets Content-Type header value.
-func (h *ResponseHeader) SetContentType(contentType string) {
- h.contentType = append(h.contentType[:0], contentType...)
-}
-
-// SetContentTypeBytes sets Content-Type header value.
-func (h *ResponseHeader) SetContentTypeBytes(contentType []byte) {
- h.contentType = append(h.contentType[:0], contentType...)
-}
-
-// Server returns Server header value.
-func (h *ResponseHeader) Server() []byte {
- return h.server
-}
-
-// SetServer sets Server header value.
-func (h *ResponseHeader) SetServer(server string) {
- h.server = append(h.server[:0], server...)
-}
-
-// SetServerBytes sets Server header value.
-func (h *ResponseHeader) SetServerBytes(server []byte) {
- h.server = append(h.server[:0], server...)
-}
-
-// ContentType returns Content-Type header value.
-func (h *RequestHeader) ContentType() []byte {
- h.parseRawHeaders()
- return h.contentType
-}
-
-// SetContentType sets Content-Type header value.
-func (h *RequestHeader) SetContentType(contentType string) {
- h.parseRawHeaders()
- h.contentType = append(h.contentType[:0], contentType...)
-}
-
-// SetContentTypeBytes sets Content-Type header value.
-func (h *RequestHeader) SetContentTypeBytes(contentType []byte) {
- h.parseRawHeaders()
- h.contentType = append(h.contentType[:0], contentType...)
-}
-
-// SetMultipartFormBoundary sets the following Content-Type:
-// 'multipart/form-data; boundary=...'
-// where ... is substituted by the given boundary.
-func (h *RequestHeader) SetMultipartFormBoundary(boundary string) {
- h.parseRawHeaders()
-
- b := h.bufKV.value[:0]
- b = append(b, strMultipartFormData...)
- b = append(b, ';', ' ')
- b = append(b, strBoundary...)
- b = append(b, '=')
- b = append(b, boundary...)
- h.bufKV.value = b
-
- h.SetContentTypeBytes(h.bufKV.value)
-}
-
-// SetMultipartFormBoundaryBytes sets the following Content-Type:
-// 'multipart/form-data; boundary=...'
-// where ... is substituted by the given boundary.
-func (h *RequestHeader) SetMultipartFormBoundaryBytes(boundary []byte) {
- h.parseRawHeaders()
-
- b := h.bufKV.value[:0]
- b = append(b, strMultipartFormData...)
- b = append(b, ';', ' ')
- b = append(b, strBoundary...)
- b = append(b, '=')
- b = append(b, boundary...)
- h.bufKV.value = b
-
- h.SetContentTypeBytes(h.bufKV.value)
-}
-
-// MultipartFormBoundary returns boundary part
-// from 'multipart/form-data; boundary=...' Content-Type.
-func (h *RequestHeader) MultipartFormBoundary() []byte {
- b := h.ContentType()
- if !bytes.HasPrefix(b, strMultipartFormData) {
- return nil
- }
- b = b[len(strMultipartFormData):]
- if len(b) == 0 || b[0] != ';' {
- return nil
- }
-
- var n int
- for len(b) > 0 {
- n++
- for len(b) > n && b[n] == ' ' {
- n++
- }
- b = b[n:]
- if !bytes.HasPrefix(b, strBoundary) {
- if n = bytes.IndexByte(b, ';'); n < 0 {
- return nil
- }
- continue
- }
-
- b = b[len(strBoundary):]
- if len(b) == 0 || b[0] != '=' {
- return nil
- }
- b = b[1:]
- if n = bytes.IndexByte(b, ';'); n >= 0 {
- b = b[:n]
- }
- if len(b) > 1 && b[0] == '"' && b[len(b)-1] == '"' {
- b = b[1 : len(b)-1]
- }
- return b
- }
- return nil
-}
-
-// Host returns Host header value.
-func (h *RequestHeader) Host() []byte {
- if len(h.host) > 0 {
- return h.host
- }
- if !h.rawHeadersParsed {
- // fast path without employing full headers parsing.
- host := peekRawHeader(h.rawHeaders, strHost)
- if len(host) > 0 {
- h.host = append(h.host[:0], host...)
- return h.host
- }
- }
-
- // slow path.
- h.parseRawHeaders()
- return h.host
-}
-
-// SetHost sets Host header value.
-func (h *RequestHeader) SetHost(host string) {
- h.parseRawHeaders()
- h.host = append(h.host[:0], host...)
-}
-
-// SetHostBytes sets Host header value.
-func (h *RequestHeader) SetHostBytes(host []byte) {
- h.parseRawHeaders()
- h.host = append(h.host[:0], host...)
-}
-
-// UserAgent returns User-Agent header value.
-func (h *RequestHeader) UserAgent() []byte {
- h.parseRawHeaders()
- return h.userAgent
-}
-
-// SetUserAgent sets User-Agent header value.
-func (h *RequestHeader) SetUserAgent(userAgent string) {
- h.parseRawHeaders()
- h.userAgent = append(h.userAgent[:0], userAgent...)
-}
-
-// SetUserAgentBytes sets User-Agent header value.
-func (h *RequestHeader) SetUserAgentBytes(userAgent []byte) {
- h.parseRawHeaders()
- h.userAgent = append(h.userAgent[:0], userAgent...)
-}
-
-// Referer returns Referer header value.
-func (h *RequestHeader) Referer() []byte {
- return h.PeekBytes(strReferer)
-}
-
-// SetReferer sets Referer header value.
-func (h *RequestHeader) SetReferer(referer string) {
- h.SetBytesK(strReferer, referer)
-}
-
-// SetRefererBytes sets Referer header value.
-func (h *RequestHeader) SetRefererBytes(referer []byte) {
- h.SetCanonical(strReferer, referer)
-}
-
-// Method returns HTTP request method.
-func (h *RequestHeader) Method() []byte {
- if len(h.method) == 0 {
- return strGet
- }
- return h.method
-}
-
-// SetMethod sets HTTP request method.
-func (h *RequestHeader) SetMethod(method string) {
- h.method = append(h.method[:0], method...)
-}
-
-// SetMethodBytes sets HTTP request method.
-func (h *RequestHeader) SetMethodBytes(method []byte) {
- h.method = append(h.method[:0], method...)
-}
-
-// RequestURI returns RequestURI from the first HTTP request line.
-func (h *RequestHeader) RequestURI() []byte {
- requestURI := h.requestURI
- if len(requestURI) == 0 {
- requestURI = strSlash
- }
- return requestURI
-}
-
-// SetRequestURI sets RequestURI for the first HTTP request line.
-// RequestURI must be properly encoded.
-// Use URI.RequestURI for constructing proper RequestURI if unsure.
-func (h *RequestHeader) SetRequestURI(requestURI string) {
- h.requestURI = append(h.requestURI[:0], requestURI...)
-}
-
-// SetRequestURIBytes sets RequestURI for the first HTTP request line.
-// RequestURI must be properly encoded.
-// Use URI.RequestURI for constructing proper RequestURI if unsure.
-func (h *RequestHeader) SetRequestURIBytes(requestURI []byte) {
- h.requestURI = append(h.requestURI[:0], requestURI...)
-}
-
-// IsGet returns true if request method is GET.
-func (h *RequestHeader) IsGet() bool {
- // Optimize fast path for GET requests.
- if !h.isGet {
- h.isGet = bytes.Equal(h.Method(), strGet)
- }
- return h.isGet
-}
-
-// IsPost returns true if request methos is POST.
-func (h *RequestHeader) IsPost() bool {
- return bytes.Equal(h.Method(), strPost)
-}
-
-// IsPut returns true if request method is PUT.
-func (h *RequestHeader) IsPut() bool {
- return bytes.Equal(h.Method(), strPut)
-}
-
-// IsHead returns true if request method is HEAD.
-func (h *RequestHeader) IsHead() bool {
- // Fast path
- if h.isGet {
- return false
- }
- return bytes.Equal(h.Method(), strHead)
-}
-
-// IsDelete returns true if request method is DELETE.
-func (h *RequestHeader) IsDelete() bool {
- return bytes.Equal(h.Method(), strDelete)
-}
-
-// IsHTTP11 returns true if the request is HTTP/1.1.
-func (h *RequestHeader) IsHTTP11() bool {
- return !h.noHTTP11
-}
-
-// IsHTTP11 returns true if the response is HTTP/1.1.
-func (h *ResponseHeader) IsHTTP11() bool {
- return !h.noHTTP11
-}
-
-// HasAcceptEncoding returns true if the header contains
-// the given Accept-Encoding value.
-func (h *RequestHeader) HasAcceptEncoding(acceptEncoding string) bool {
- h.bufKV.value = append(h.bufKV.value[:0], acceptEncoding...)
- return h.HasAcceptEncodingBytes(h.bufKV.value)
-}
-
-// HasAcceptEncodingBytes returns true if the header contains
-// the given Accept-Encoding value.
-func (h *RequestHeader) HasAcceptEncodingBytes(acceptEncoding []byte) bool {
- ae := h.peek(strAcceptEncoding)
- n := bytes.Index(ae, acceptEncoding)
- if n < 0 {
- return false
- }
- b := ae[n+len(acceptEncoding):]
- if len(b) > 0 && b[0] != ',' {
- return false
- }
- if n == 0 {
- return true
- }
- return ae[n-1] == ' '
-}
-
-// Len returns the number of headers set,
-// i.e. the number of times f is called in VisitAll.
-func (h *ResponseHeader) Len() int {
- n := 0
- h.VisitAll(func(k, v []byte) { n++ })
- return n
-}
-
-// Len returns the number of headers set,
-// i.e. the number of times f is called in VisitAll.
-func (h *RequestHeader) Len() int {
- n := 0
- h.VisitAll(func(k, v []byte) { n++ })
- return n
-}
-
-// Reset clears response header.
-func (h *ResponseHeader) Reset() {
- h.noHTTP11 = false
- h.connectionClose = false
-
- h.statusCode = 0
- h.contentLength = 0
- h.contentLengthBytes = h.contentLengthBytes[:0]
-
- h.contentType = h.contentType[:0]
- h.server = h.server[:0]
-
- h.h = h.h[:0]
- h.cookies = h.cookies[:0]
-}
-
-// Reset clears request header.
-func (h *RequestHeader) Reset() {
- h.noHTTP11 = false
- h.connectionClose = false
- h.isGet = false
-
- h.contentLength = 0
- h.contentLengthBytes = h.contentLengthBytes[:0]
-
- h.method = h.method[:0]
- h.requestURI = h.requestURI[:0]
- h.host = h.host[:0]
- h.contentType = h.contentType[:0]
- h.userAgent = h.userAgent[:0]
-
- h.h = h.h[:0]
- h.cookies = h.cookies[:0]
- h.cookiesCollected = false
-
- h.rawHeaders = h.rawHeaders[:0]
- h.rawHeadersParsed = false
-}
-
-// CopyTo copies all the headers to dst.
-func (h *ResponseHeader) CopyTo(dst *ResponseHeader) {
- dst.Reset()
-
- dst.noHTTP11 = h.noHTTP11
- dst.connectionClose = h.connectionClose
-
- dst.statusCode = h.statusCode
- dst.contentLength = h.contentLength
- dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...)
- dst.contentType = append(dst.contentType[:0], h.contentType...)
- dst.server = append(dst.server[:0], h.server...)
- dst.h = copyArgs(dst.h, h.h)
- dst.cookies = copyArgs(dst.cookies, h.cookies)
-}
-
-// CopyTo copies all the headers to dst.
-func (h *RequestHeader) CopyTo(dst *RequestHeader) {
- dst.Reset()
-
- dst.noHTTP11 = h.noHTTP11
- dst.connectionClose = h.connectionClose
- dst.isGet = h.isGet
-
- dst.contentLength = h.contentLength
- dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...)
- dst.method = append(dst.method[:0], h.method...)
- dst.requestURI = append(dst.requestURI[:0], h.requestURI...)
- dst.host = append(dst.host[:0], h.host...)
- dst.contentType = append(dst.contentType[:0], h.contentType...)
- dst.userAgent = append(dst.userAgent[:0], h.userAgent...)
- dst.h = copyArgs(dst.h, h.h)
- dst.cookies = copyArgs(dst.cookies, h.cookies)
- dst.cookiesCollected = h.cookiesCollected
- dst.rawHeaders = append(dst.rawHeaders[:0], h.rawHeaders...)
- dst.rawHeadersParsed = h.rawHeadersParsed
-}
-
-// VisitAll calls f for each header.
-//
-// f must not retain references to key and/or value after returning.
-// Copy key and/or value contents before returning if you need retaining them.
-func (h *ResponseHeader) VisitAll(f func(key, value []byte)) {
- if len(h.contentLengthBytes) > 0 {
- f(strContentLength, h.contentLengthBytes)
- }
- contentType := h.ContentType()
- if len(contentType) > 0 {
- f(strContentType, contentType)
- }
- server := h.Server()
- if len(server) > 0 {
- f(strServer, server)
- }
- if len(h.cookies) > 0 {
- visitArgs(h.cookies, func(k, v []byte) {
- f(strSetCookie, v)
- })
- }
- visitArgs(h.h, f)
- if h.ConnectionClose() {
- f(strConnection, strClose)
- }
-}
-
-// VisitAllCookie calls f for each response cookie.
-//
-// Cookie name is passed in key and the whole Set-Cookie header value
-// is passed in value on each f invocation. Value may be parsed
-// with Cookie.ParseBytes().
-//
-// f must not retain references to key and/or value after returning.
-func (h *ResponseHeader) VisitAllCookie(f func(key, value []byte)) {
- visitArgs(h.cookies, f)
-}
-
-// VisitAllCookie calls f for each request cookie.
-//
-// f must not retain references to key and/or value after returning.
-func (h *RequestHeader) VisitAllCookie(f func(key, value []byte)) {
- h.parseRawHeaders()
- h.collectCookies()
- visitArgs(h.cookies, f)
-}
-
-// VisitAll calls f for each header.
-//
-// f must not retain references to key and/or value after returning.
-// Copy key and/or value contents before returning if you need retaining them.
-func (h *RequestHeader) VisitAll(f func(key, value []byte)) {
- h.parseRawHeaders()
- host := h.Host()
- if len(host) > 0 {
- f(strHost, host)
- }
- if len(h.contentLengthBytes) > 0 {
- f(strContentLength, h.contentLengthBytes)
- }
- contentType := h.ContentType()
- if len(contentType) > 0 {
- f(strContentType, contentType)
- }
- userAgent := h.UserAgent()
- if len(userAgent) > 0 {
- f(strUserAgent, userAgent)
- }
-
- h.collectCookies()
- if len(h.cookies) > 0 {
- h.bufKV.value = appendRequestCookieBytes(h.bufKV.value[:0], h.cookies)
- f(strCookie, h.bufKV.value)
- }
- visitArgs(h.h, f)
- if h.ConnectionClose() {
- f(strConnection, strClose)
- }
-}
-
-// Del deletes header with the given key.
-func (h *ResponseHeader) Del(key string) {
- k := getHeaderKeyBytes(&h.bufKV, key)
- h.del(k)
-}
-
-// DelBytes deletes header with the given key.
-func (h *ResponseHeader) DelBytes(key []byte) {
- h.bufKV.key = append(h.bufKV.key[:0], key...)
- normalizeHeaderKey(h.bufKV.key)
- h.del(h.bufKV.key)
-}
-
-func (h *ResponseHeader) del(key []byte) {
- switch string(key) {
- case "Content-Type":
- h.contentType = h.contentType[:0]
- case "Server":
- h.server = h.server[:0]
- case "Set-Cookie":
- h.cookies = h.cookies[:0]
- case "Content-Length":
- h.contentLength = 0
- h.contentLengthBytes = h.contentLengthBytes[:0]
- case "Connection":
- h.connectionClose = false
- }
- h.h = delAllArgsBytes(h.h, key)
-}
-
-// Del deletes header with the given key.
-func (h *RequestHeader) Del(key string) {
- h.parseRawHeaders()
- k := getHeaderKeyBytes(&h.bufKV, key)
- h.del(k)
-}
-
-// DelBytes deletes header with the given key.
-func (h *RequestHeader) DelBytes(key []byte) {
- h.parseRawHeaders()
- h.bufKV.key = append(h.bufKV.key[:0], key...)
- normalizeHeaderKey(h.bufKV.key)
- h.del(h.bufKV.key)
-}
-
-func (h *RequestHeader) del(key []byte) {
- switch string(key) {
- case "Host":
- h.host = h.host[:0]
- case "Content-Type":
- h.contentType = h.contentType[:0]
- case "User-Agent":
- h.userAgent = h.userAgent[:0]
- case "Cookie":
- h.cookies = h.cookies[:0]
- case "Content-Length":
- h.contentLength = 0
- h.contentLengthBytes = h.contentLengthBytes[:0]
- case "Connection":
- h.connectionClose = false
- }
- h.h = delAllArgsBytes(h.h, key)
-}
-
-// Add adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use Set for setting a single header for the given key.
-func (h *ResponseHeader) Add(key, value string) {
- k := getHeaderKeyBytes(&h.bufKV, key)
- h.h = appendArg(h.h, b2s(k), value)
-}
-
-// AddBytesK adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use SetBytesK for setting a single header for the given key.
-func (h *ResponseHeader) AddBytesK(key []byte, value string) {
- h.Add(b2s(key), value)
-}
-
-// AddBytesV adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use SetBytesV for setting a single header for the given key.
-func (h *ResponseHeader) AddBytesV(key string, value []byte) {
- h.Add(key, b2s(value))
-}
-
-// AddBytesKV adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use SetBytesKV for setting a single header for the given key.
-func (h *ResponseHeader) AddBytesKV(key, value []byte) {
- h.Add(b2s(key), b2s(value))
-}
-
-// Set sets the given 'key: value' header.
-//
-// Use Add for setting multiple header values under the same key.
-func (h *ResponseHeader) Set(key, value string) {
- initHeaderKV(&h.bufKV, key, value)
- h.SetCanonical(h.bufKV.key, h.bufKV.value)
-}
-
-// SetBytesK sets the given 'key: value' header.
-//
-// Use AddBytesK for setting multiple header values under the same key.
-func (h *ResponseHeader) SetBytesK(key []byte, value string) {
- h.bufKV.value = append(h.bufKV.value[:0], value...)
- h.SetBytesKV(key, h.bufKV.value)
-}
-
-// SetBytesV sets the given 'key: value' header.
-//
-// Use AddBytesV for setting multiple header values under the same key.
-func (h *ResponseHeader) SetBytesV(key string, value []byte) {
- k := getHeaderKeyBytes(&h.bufKV, key)
- h.SetCanonical(k, value)
-}
-
-// SetBytesKV sets the given 'key: value' header.
-//
-// Use AddBytesKV for setting multiple header values under the same key.
-func (h *ResponseHeader) SetBytesKV(key, value []byte) {
- h.bufKV.key = append(h.bufKV.key[:0], key...)
- normalizeHeaderKey(h.bufKV.key)
- h.SetCanonical(h.bufKV.key, value)
-}
-
-// SetCanonical sets the given 'key: value' header assuming that
-// key is in canonical form.
-func (h *ResponseHeader) SetCanonical(key, value []byte) {
- switch string(key) {
- case "Content-Type":
- h.SetContentTypeBytes(value)
- case "Server":
- h.SetServerBytes(value)
- case "Set-Cookie":
- var kv *argsKV
- h.cookies, kv = allocArg(h.cookies)
- kv.key = getCookieKey(kv.key, value)
- kv.value = append(kv.value[:0], value...)
- case "Content-Length":
- if contentLength, err := parseContentLength(value); err == nil {
- h.contentLength = contentLength
- h.contentLengthBytes = append(h.contentLengthBytes[:0], value...)
- }
- case "Connection":
- if bytes.Equal(strClose, value) {
- h.SetConnectionClose()
- } else {
- h.ResetConnectionClose()
- h.h = setArgBytes(h.h, key, value)
- }
- case "Transfer-Encoding":
- // Transfer-Encoding is managed automatically.
- case "Date":
- // Date is managed automatically.
- default:
- h.h = setArgBytes(h.h, key, value)
- }
-}
-
-// SetCookie sets the given response cookie.
-//
-// It is save re-using the cookie after the function returns.
-func (h *ResponseHeader) SetCookie(cookie *Cookie) {
- h.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie())
-}
-
-// SetCookie sets 'key: value' cookies.
-func (h *RequestHeader) SetCookie(key, value string) {
- h.parseRawHeaders()
- h.collectCookies()
- h.cookies = setArg(h.cookies, key, value)
-}
-
-// SetCookieBytesK sets 'key: value' cookies.
-func (h *RequestHeader) SetCookieBytesK(key []byte, value string) {
- h.SetCookie(b2s(key), value)
-}
-
-// SetCookieBytesKV sets 'key: value' cookies.
-func (h *RequestHeader) SetCookieBytesKV(key, value []byte) {
- h.SetCookie(b2s(key), b2s(value))
-}
-
-// DelClientCookie instructs the client to remove the given cookie.
-//
-// Use DelCookie if you want just removing the cookie from response header.
-func (h *ResponseHeader) DelClientCookie(key string) {
- h.DelCookie(key)
-
- c := AcquireCookie()
- c.SetKey(key)
- c.SetExpire(CookieExpireDelete)
- h.SetCookie(c)
- ReleaseCookie(c)
-}
-
-// DelClientCookieBytes instructs the client to remove the given cookie.
-//
-// Use DelCookieBytes if you want just removing the cookie from response header.
-func (h *ResponseHeader) DelClientCookieBytes(key []byte) {
- h.DelClientCookie(b2s(key))
-}
-
-// DelCookie removes cookie under the given key from response header.
-//
-// Note that DelCookie doesn't remove the cookie from the client.
-// Use DelClientCookie instead.
-func (h *ResponseHeader) DelCookie(key string) {
- h.cookies = delAllArgs(h.cookies, key)
-}
-
-// DelCookieBytes removes cookie under the given key from response header.
-//
-// Note that DelCookieBytes doesn't remove the cookie from the client.
-// Use DelClientCookieBytes instead.
-func (h *ResponseHeader) DelCookieBytes(key []byte) {
- h.DelCookie(b2s(key))
-}
-
-// DelCookie removes cookie under the given key.
-func (h *RequestHeader) DelCookie(key string) {
- h.parseRawHeaders()
- h.collectCookies()
- h.cookies = delAllArgs(h.cookies, key)
-}
-
-// DelCookieBytes removes cookie under the given key.
-func (h *RequestHeader) DelCookieBytes(key []byte) {
- h.DelCookie(b2s(key))
-}
-
-// DelAllCookies removes all the cookies from response headers.
-func (h *ResponseHeader) DelAllCookies() {
- h.cookies = h.cookies[:0]
-}
-
-// DelAllCookies removes all the cookies from request headers.
-func (h *RequestHeader) DelAllCookies() {
- h.parseRawHeaders()
- h.collectCookies()
- h.cookies = h.cookies[:0]
-}
-
-// Add adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use Set for setting a single header for the given key.
-func (h *RequestHeader) Add(key, value string) {
- k := getHeaderKeyBytes(&h.bufKV, key)
- h.h = appendArg(h.h, b2s(k), value)
-}
-
-// AddBytesK adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use SetBytesK for setting a single header for the given key.
-func (h *RequestHeader) AddBytesK(key []byte, value string) {
- h.Add(b2s(key), value)
-}
-
-// AddBytesV adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use SetBytesV for setting a single header for the given key.
-func (h *RequestHeader) AddBytesV(key string, value []byte) {
- h.Add(key, b2s(value))
-}
-
-// AddBytesKV adds the given 'key: value' header.
-//
-// Multiple headers with the same key may be added with this function.
-// Use SetBytesKV for setting a single header for the given key.
-func (h *RequestHeader) AddBytesKV(key, value []byte) {
- h.Add(b2s(key), b2s(value))
-}
-
-// Set sets the given 'key: value' header.
-//
-// Use Add for setting multiple header values under the same key.
-func (h *RequestHeader) Set(key, value string) {
- initHeaderKV(&h.bufKV, key, value)
- h.SetCanonical(h.bufKV.key, h.bufKV.value)
-}
-
-// SetBytesK sets the given 'key: value' header.
-//
-// Use AddBytesK for setting multiple header values under the same key.
-func (h *RequestHeader) SetBytesK(key []byte, value string) {
- h.bufKV.value = append(h.bufKV.value[:0], value...)
- h.SetBytesKV(key, h.bufKV.value)
-}
-
-// SetBytesV sets the given 'key: value' header.
-//
-// Use AddBytesV for setting multiple header values under the same key.
-func (h *RequestHeader) SetBytesV(key string, value []byte) {
- k := getHeaderKeyBytes(&h.bufKV, key)
- h.SetCanonical(k, value)
-}
-
-// SetBytesKV sets the given 'key: value' header.
-//
-// Use AddBytesKV for setting multiple header values under the same key.
-func (h *RequestHeader) SetBytesKV(key, value []byte) {
- h.bufKV.key = append(h.bufKV.key[:0], key...)
- normalizeHeaderKey(h.bufKV.key)
- h.SetCanonical(h.bufKV.key, value)
-}
-
-// SetCanonical sets the given 'key: value' header assuming that
-// key is in canonical form.
-func (h *RequestHeader) SetCanonical(key, value []byte) {
- h.parseRawHeaders()
- switch string(key) {
- case "Host":
- h.SetHostBytes(value)
- case "Content-Type":
- h.SetContentTypeBytes(value)
- case "User-Agent":
- h.SetUserAgentBytes(value)
- case "Cookie":
- h.collectCookies()
- h.cookies = parseRequestCookies(h.cookies, value)
- case "Content-Length":
- if contentLength, err := parseContentLength(value); err == nil {
- h.contentLength = contentLength
- h.contentLengthBytes = append(h.contentLengthBytes[:0], value...)
- }
- case "Connection":
- if bytes.Equal(strClose, value) {
- h.SetConnectionClose()
- } else {
- h.ResetConnectionClose()
- h.h = setArgBytes(h.h, key, value)
- }
- case "Transfer-Encoding":
- // Transfer-Encoding is managed automatically.
- default:
- h.h = setArgBytes(h.h, key, value)
- }
-}
-
-// Peek returns header value for the given key.
-//
-// Returned value is valid until the next call to ResponseHeader.
-// Do not store references to returned value. Make copies instead.
-func (h *ResponseHeader) Peek(key string) []byte {
- k := getHeaderKeyBytes(&h.bufKV, key)
- return h.peek(k)
-}
-
-// PeekBytes returns header value for the given key.
-//
-// Returned value is valid until the next call to ResponseHeader.
-// Do not store references to returned value. Make copies instead.
-func (h *ResponseHeader) PeekBytes(key []byte) []byte {
- h.bufKV.key = append(h.bufKV.key[:0], key...)
- normalizeHeaderKey(h.bufKV.key)
- return h.peek(h.bufKV.key)
-}
-
-// Peek returns header value for the given key.
-//
-// Returned value is valid until the next call to RequestHeader.
-// Do not store references to returned value. Make copies instead.
-func (h *RequestHeader) Peek(key string) []byte {
- k := getHeaderKeyBytes(&h.bufKV, key)
- return h.peek(k)
-}
-
-// PeekBytes returns header value for the given key.
-//
-// Returned value is valid until the next call to RequestHeader.
-// Do not store references to returned value. Make copies instead.
-func (h *RequestHeader) PeekBytes(key []byte) []byte {
- h.bufKV.key = append(h.bufKV.key[:0], key...)
- normalizeHeaderKey(h.bufKV.key)
- return h.peek(h.bufKV.key)
-}
-
-func (h *ResponseHeader) peek(key []byte) []byte {
- switch string(key) {
- case "Content-Type":
- return h.ContentType()
- case "Server":
- return h.Server()
- case "Connection":
- if h.ConnectionClose() {
- return strClose
- }
- return peekArgBytes(h.h, key)
- case "Content-Length":
- return h.contentLengthBytes
- default:
- return peekArgBytes(h.h, key)
- }
-}
-
-func (h *RequestHeader) peek(key []byte) []byte {
- h.parseRawHeaders()
- switch string(key) {
- case "Host":
- return h.Host()
- case "Content-Type":
- return h.ContentType()
- case "User-Agent":
- return h.UserAgent()
- case "Connection":
- if h.ConnectionClose() {
- return strClose
- }
- return peekArgBytes(h.h, key)
- case "Content-Length":
- return h.contentLengthBytes
- default:
- return peekArgBytes(h.h, key)
- }
-}
-
-// Cookie returns cookie for the given key.
-func (h *RequestHeader) Cookie(key string) []byte {
- h.parseRawHeaders()
- h.collectCookies()
- return peekArgStr(h.cookies, key)
-}
-
-// CookieBytes returns cookie for the given key.
-func (h *RequestHeader) CookieBytes(key []byte) []byte {
- h.parseRawHeaders()
- h.collectCookies()
- return peekArgBytes(h.cookies, key)
-}
-
-// Cookie fills cookie for the given cookie.Key.
-//
-// Returns false if cookie with the given cookie.Key is missing.
-func (h *ResponseHeader) Cookie(cookie *Cookie) bool {
- v := peekArgBytes(h.cookies, cookie.Key())
- if v == nil {
- return false
- }
- cookie.ParseBytes(v)
- return true
-}
-
-// Read reads response header from r.
-//
-// io.EOF is returned if r is closed before reading the first header byte.
-func (h *ResponseHeader) Read(r *bufio.Reader) error {
- n := 1
- for {
- err := h.tryRead(r, n)
- if err == nil {
- return nil
- }
- if err != errNeedMore {
- h.Reset()
- return err
- }
- n = r.Buffered() + 1
- }
-}
-
-func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error {
- h.Reset()
- b, err := r.Peek(n)
- if len(b) == 0 {
- // treat all errors on the first byte read as EOF
- if n == 1 || err == io.EOF {
- return io.EOF
- }
-
- // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 .
- if err == bufio.ErrBufferFull {
- return &ErrSmallBuffer{
- error: fmt.Errorf("error when reading response headers: %s", errSmallBuffer),
- }
- }
-
- return fmt.Errorf("error when reading response headers: %s", err)
- }
- b = mustPeekBuffered(r)
- headersLen, errParse := h.parse(b)
- if errParse != nil {
- return headerError("response", err, errParse, b)
- }
- mustDiscard(r, headersLen)
- return nil
-}
-
-func headerError(typ string, err, errParse error, b []byte) error {
- if errParse != errNeedMore {
- return headerErrorMsg(typ, errParse, b)
- }
- if err == nil {
- return errNeedMore
- }
-
- // Buggy servers may leave trailing CRLFs after http body.
- // Treat this case as EOF.
- if isOnlyCRLF(b) {
- return io.EOF
- }
-
- if err != bufio.ErrBufferFull {
- return headerErrorMsg(typ, err, b)
- }
- return &ErrSmallBuffer{
- error: headerErrorMsg(typ, errSmallBuffer, b),
- }
-}
-
-func headerErrorMsg(typ string, err error, b []byte) error {
- return fmt.Errorf("error when reading %s headers: %s. Buffer size=%d, contents: %s", typ, err, len(b), bufferSnippet(b))
-}
-
-// Read reads request header from r.
-//
-// io.EOF is returned if r is closed before reading the first header byte.
-func (h *RequestHeader) Read(r *bufio.Reader) error {
- n := 1
- for {
- err := h.tryRead(r, n)
- if err == nil {
- return nil
- }
- if err != errNeedMore {
- h.Reset()
- return err
- }
- n = r.Buffered() + 1
- }
-}
-
-func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error {
- h.Reset()
- b, err := r.Peek(n)
- if len(b) == 0 {
- // treat all errors on the first byte read as EOF
- if n == 1 || err == io.EOF {
- return io.EOF
- }
-
- // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 .
- if err == bufio.ErrBufferFull {
- return &ErrSmallBuffer{
- error: fmt.Errorf("error when reading request headers: %s", errSmallBuffer),
- }
- }
-
- return fmt.Errorf("error when reading request headers: %s", err)
- }
- b = mustPeekBuffered(r)
- headersLen, errParse := h.parse(b)
- if errParse != nil {
- return headerError("request", err, errParse, b)
- }
- mustDiscard(r, headersLen)
- return nil
-}
-
-func bufferSnippet(b []byte) string {
- n := len(b)
- start := 200
- end := n - start
- if start >= end {
- start = n
- end = n
- }
- bStart, bEnd := b[:start], b[end:]
- if len(bEnd) == 0 {
- return fmt.Sprintf("%q", b)
- }
- return fmt.Sprintf("%q...%q", bStart, bEnd)
-}
-
-func isOnlyCRLF(b []byte) bool {
- for _, ch := range b {
- if ch != '\r' && ch != '\n' {
- return false
- }
- }
- return true
-}
-
-func init() {
- refreshServerDate()
- go func() {
- for {
- time.Sleep(time.Second)
- refreshServerDate()
- }
- }()
-}
-
-var serverDate atomic.Value
-
-func refreshServerDate() {
- b := AppendHTTPDate(nil, time.Now())
- serverDate.Store(b)
-}
-
-// Write writes response header to w.
-func (h *ResponseHeader) Write(w *bufio.Writer) error {
- _, err := w.Write(h.Header())
- return err
-}
-
-// WriteTo writes response header to w.
-//
-// WriteTo implements io.WriterTo interface.
-func (h *ResponseHeader) WriteTo(w io.Writer) (int64, error) {
- n, err := w.Write(h.Header())
- return int64(n), err
-}
-
-// Header returns response header representation.
-//
-// The returned value is valid until the next call to ResponseHeader methods.
-func (h *ResponseHeader) Header() []byte {
- h.bufKV.value = h.AppendBytes(h.bufKV.value[:0])
- return h.bufKV.value
-}
-
-// String returns response header representation.
-func (h *ResponseHeader) String() string {
- return string(h.Header())
-}
-
-// AppendBytes appends response header representation to dst and returns
-// the extended dst.
-func (h *ResponseHeader) AppendBytes(dst []byte) []byte {
- statusCode := h.StatusCode()
- if statusCode < 0 {
- statusCode = StatusOK
- }
- dst = append(dst, statusLine(statusCode)...)
-
- server := h.Server()
- if len(server) == 0 {
- server = defaultServerName
- }
- dst = appendHeaderLine(dst, strServer, server)
- dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte))
-
- // Append Content-Type only for non-zero responses
- // or if it is explicitly set.
- // See https://github.com/valyala/fasthttp/issues/28 .
- if h.ContentLength() != 0 || len(h.contentType) > 0 {
- dst = appendHeaderLine(dst, strContentType, h.ContentType())
- }
-
- if len(h.contentLengthBytes) > 0 {
- dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes)
- }
-
- for i, n := 0, len(h.h); i < n; i++ {
- kv := &h.h[i]
- if !bytes.Equal(kv.key, strDate) {
- dst = appendHeaderLine(dst, kv.key, kv.value)
- }
- }
-
- n := len(h.cookies)
- if n > 0 {
- for i := 0; i < n; i++ {
- kv := &h.cookies[i]
- dst = appendHeaderLine(dst, strSetCookie, kv.value)
- }
- }
-
- if h.ConnectionClose() {
- dst = appendHeaderLine(dst, strConnection, strClose)
- }
-
- return append(dst, strCRLF...)
-}
-
-// Write writes request header to w.
-func (h *RequestHeader) Write(w *bufio.Writer) error {
- _, err := w.Write(h.Header())
- return err
-}
-
-// WriteTo writes request header to w.
-//
-// WriteTo implements io.WriterTo interface.
-func (h *RequestHeader) WriteTo(w io.Writer) (int64, error) {
- n, err := w.Write(h.Header())
- return int64(n), err
-}
-
-// Header returns request header representation.
-//
-// The returned representation is valid until the next call to RequestHeader methods.
-func (h *RequestHeader) Header() []byte {
- h.bufKV.value = h.AppendBytes(h.bufKV.value[:0])
- return h.bufKV.value
-}
-
-// String returns request header representation.
-func (h *RequestHeader) String() string {
- return string(h.Header())
-}
-
-// AppendBytes appends request header representation to dst and returns
-// the extended dst.
-func (h *RequestHeader) AppendBytes(dst []byte) []byte {
- // there is no need in h.parseRawHeaders() here - raw headers are specially handled below.
- dst = append(dst, h.Method()...)
- dst = append(dst, ' ')
- dst = append(dst, h.RequestURI()...)
- dst = append(dst, ' ')
- dst = append(dst, strHTTP11...)
- dst = append(dst, strCRLF...)
-
- if !h.rawHeadersParsed && len(h.rawHeaders) > 0 {
- return append(dst, h.rawHeaders...)
- }
-
- userAgent := h.UserAgent()
- if len(userAgent) == 0 {
- userAgent = defaultUserAgent
- }
- dst = appendHeaderLine(dst, strUserAgent, userAgent)
-
- host := h.Host()
- if len(host) > 0 {
- dst = appendHeaderLine(dst, strHost, host)
- }
-
- contentType := h.ContentType()
- if !h.noBody() {
- if len(contentType) == 0 {
- contentType = strPostArgsContentType
- }
- dst = appendHeaderLine(dst, strContentType, contentType)
-
- if len(h.contentLengthBytes) > 0 {
- dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes)
- }
- } else if len(contentType) > 0 {
- dst = appendHeaderLine(dst, strContentType, contentType)
- }
-
- for i, n := 0, len(h.h); i < n; i++ {
- kv := &h.h[i]
- dst = appendHeaderLine(dst, kv.key, kv.value)
- }
-
- // there is no need in h.collectCookies() here, since if cookies aren't collected yet,
- // they all are located in h.h.
- n := len(h.cookies)
- if n > 0 {
- dst = append(dst, strCookie...)
- dst = append(dst, strColonSpace...)
- dst = appendRequestCookieBytes(dst, h.cookies)
- dst = append(dst, strCRLF...)
- }
-
- if h.ConnectionClose() {
- dst = appendHeaderLine(dst, strConnection, strClose)
- }
-
- return append(dst, strCRLF...)
-}
-
-func appendHeaderLine(dst, key, value []byte) []byte {
- dst = append(dst, key...)
- dst = append(dst, strColonSpace...)
- dst = append(dst, value...)
- return append(dst, strCRLF...)
-}
-
-func (h *ResponseHeader) parse(buf []byte) (int, error) {
- m, err := h.parseFirstLine(buf)
- if err != nil {
- return 0, err
- }
- n, err := h.parseHeaders(buf[m:])
- if err != nil {
- return 0, err
- }
- return m + n, nil
-}
-
-func (h *RequestHeader) noBody() bool {
- return h.IsGet() || h.IsHead()
-}
-
-func (h *RequestHeader) parse(buf []byte) (int, error) {
- m, err := h.parseFirstLine(buf)
- if err != nil {
- return 0, err
- }
-
- var n int
- if !h.noBody() || h.noHTTP11 {
- n, err = h.parseHeaders(buf[m:])
- if err != nil {
- return 0, err
- }
- h.rawHeadersParsed = true
- } else {
- var rawHeaders []byte
- rawHeaders, n, err = readRawHeaders(h.rawHeaders[:0], buf[m:])
- if err != nil {
- return 0, err
- }
- h.rawHeaders = rawHeaders
- }
- return m + n, nil
-}
-
-func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) {
- bNext := buf
- var b []byte
- var err error
- for len(b) == 0 {
- if b, bNext, err = nextLine(bNext); err != nil {
- return 0, err
- }
- }
-
- // parse protocol
- n := bytes.IndexByte(b, ' ')
- if n < 0 {
- return 0, fmt.Errorf("cannot find whitespace in the first line of response %q", buf)
- }
- h.noHTTP11 = !bytes.Equal(b[:n], strHTTP11)
- b = b[n+1:]
-
- // parse status code
- h.statusCode, n, err = parseUintBuf(b)
- if err != nil {
- return 0, fmt.Errorf("cannot parse response status code: %s. Response %q", err, buf)
- }
- if len(b) > n && b[n] != ' ' {
- return 0, fmt.Errorf("unexpected char at the end of status code. Response %q", buf)
- }
-
- return len(buf) - len(bNext), nil
-}
-
-func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) {
- bNext := buf
- var b []byte
- var err error
- for len(b) == 0 {
- if b, bNext, err = nextLine(bNext); err != nil {
- return 0, err
- }
- }
-
- // parse method
- n := bytes.IndexByte(b, ' ')
- if n <= 0 {
- return 0, fmt.Errorf("cannot find http request method in %q", buf)
- }
- h.method = append(h.method[:0], b[:n]...)
- b = b[n+1:]
-
- // parse requestURI
- n = bytes.LastIndexByte(b, ' ')
- if n < 0 {
- h.noHTTP11 = true
- n = len(b)
- } else if n == 0 {
- return 0, fmt.Errorf("requestURI cannot be empty in %q", buf)
- } else if !bytes.Equal(b[n+1:], strHTTP11) {
- h.noHTTP11 = true
- }
- h.requestURI = append(h.requestURI[:0], b[:n]...)
-
- return len(buf) - len(bNext), nil
-}
-
-func peekRawHeader(buf, key []byte) []byte {
- n := bytes.Index(buf, key)
- if n < 0 {
- return nil
- }
- if n > 0 && buf[n-1] != '\n' {
- return nil
- }
- n += len(key)
- if n >= len(buf) {
- return nil
- }
- if buf[n] != ':' {
- return nil
- }
- n++
- if buf[n] != ' ' {
- return nil
- }
- n++
- buf = buf[n:]
- n = bytes.IndexByte(buf, '\n')
- if n < 0 {
- return nil
- }
- if n > 0 && buf[n-1] == '\r' {
- n--
- }
- return buf[:n]
-}
-
-func readRawHeaders(dst, buf []byte) ([]byte, int, error) {
- n := bytes.IndexByte(buf, '\n')
- if n < 0 {
- return nil, 0, errNeedMore
- }
- if (n == 1 && buf[0] == '\r') || n == 0 {
- // empty headers
- return dst, n + 1, nil
- }
-
- n++
- b := buf
- m := n
- for {
- b = b[m:]
- m = bytes.IndexByte(b, '\n')
- if m < 0 {
- return nil, 0, errNeedMore
- }
- m++
- n += m
- if (m == 2 && b[0] == '\r') || m == 1 {
- dst = append(dst, buf[:n]...)
- return dst, n, nil
- }
- }
-}
-
-func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) {
- // 'identity' content-length by default
- h.contentLength = -2
-
- var s headerScanner
- s.b = buf
- var err error
- var kv *argsKV
- for s.next() {
- switch string(s.key) {
- case "Content-Type":
- h.contentType = append(h.contentType[:0], s.value...)
- case "Server":
- h.server = append(h.server[:0], s.value...)
- case "Content-Length":
- if h.contentLength != -1 {
- if h.contentLength, err = parseContentLength(s.value); err != nil {
- h.contentLength = -2
- } else {
- h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
- }
- }
- case "Transfer-Encoding":
- if !bytes.Equal(s.value, strIdentity) {
- h.contentLength = -1
- h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
- }
- case "Set-Cookie":
- h.cookies, kv = allocArg(h.cookies)
- kv.key = getCookieKey(kv.key, s.value)
- kv.value = append(kv.value[:0], s.value...)
- case "Connection":
- if bytes.Equal(s.value, strClose) {
- h.connectionClose = true
- } else {
- h.connectionClose = false
- h.h = appendArgBytes(h.h, s.key, s.value)
- }
- default:
- h.h = appendArgBytes(h.h, s.key, s.value)
- }
- }
- if s.err != nil {
- h.connectionClose = true
- return 0, s.err
- }
-
- if h.contentLength < 0 {
- h.contentLengthBytes = h.contentLengthBytes[:0]
- }
- if h.contentLength == -2 && !h.ConnectionUpgrade() && !h.mustSkipContentLength() {
- h.h = setArgBytes(h.h, strTransferEncoding, strIdentity)
- h.connectionClose = true
- }
- if h.noHTTP11 && !h.connectionClose {
- // close connection for non-http/1.1 response unless 'Connection: keep-alive' is set.
- v := peekArgBytes(h.h, strConnection)
- h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase)
- }
-
- return len(buf) - len(s.b), nil
-}
-
-func (h *RequestHeader) parseHeaders(buf []byte) (int, error) {
- h.contentLength = -2
-
- var s headerScanner
- s.b = buf
- var err error
- for s.next() {
- switch string(s.key) {
- case "Host":
- h.host = append(h.host[:0], s.value...)
- case "User-Agent":
- h.userAgent = append(h.userAgent[:0], s.value...)
- case "Content-Type":
- h.contentType = append(h.contentType[:0], s.value...)
- case "Content-Length":
- if h.contentLength != -1 {
- if h.contentLength, err = parseContentLength(s.value); err != nil {
- h.contentLength = -2
- } else {
- h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
- }
- }
- case "Transfer-Encoding":
- if !bytes.Equal(s.value, strIdentity) {
- h.contentLength = -1
- h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
- }
- case "Connection":
- if bytes.Equal(s.value, strClose) {
- h.connectionClose = true
- } else {
- h.connectionClose = false
- h.h = appendArgBytes(h.h, s.key, s.value)
- }
- default:
- h.h = appendArgBytes(h.h, s.key, s.value)
- }
- }
- if s.err != nil {
- h.connectionClose = true
- return 0, s.err
- }
-
- if h.contentLength < 0 {
- h.contentLengthBytes = h.contentLengthBytes[:0]
- }
- if h.noBody() {
- h.contentLength = 0
- h.contentLengthBytes = h.contentLengthBytes[:0]
- }
- if h.noHTTP11 && !h.connectionClose {
- // close connection for non-http/1.1 request unless 'Connection: keep-alive' is set.
- v := peekArgBytes(h.h, strConnection)
- h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase)
- }
-
- return len(buf) - len(s.b), nil
-}
-
-func (h *RequestHeader) parseRawHeaders() {
- if h.rawHeadersParsed {
- return
- }
- h.rawHeadersParsed = true
- if len(h.rawHeaders) == 0 {
- return
- }
- h.parseHeaders(h.rawHeaders)
-}
-
-func (h *RequestHeader) collectCookies() {
- if h.cookiesCollected {
- return
- }
-
- for i, n := 0, len(h.h); i < n; i++ {
- kv := &h.h[i]
- if bytes.Equal(kv.key, strCookie) {
- h.cookies = parseRequestCookies(h.cookies, kv.value)
- tmp := *kv
- copy(h.h[i:], h.h[i+1:])
- n--
- i--
- h.h[n] = tmp
- h.h = h.h[:n]
- }
- }
- h.cookiesCollected = true
-}
-
-func parseContentLength(b []byte) (int, error) {
- v, n, err := parseUintBuf(b)
- if err != nil {
- return -1, err
- }
- if n != len(b) {
- return -1, fmt.Errorf("non-numeric chars at the end of Content-Length")
- }
- return v, nil
-}
-
-type headerScanner struct {
- b []byte
- key []byte
- value []byte
- err error
-}
-
-func (s *headerScanner) next() bool {
- bLen := len(s.b)
- if bLen >= 2 && s.b[0] == '\r' && s.b[1] == '\n' {
- s.b = s.b[2:]
- return false
- }
- if bLen >= 1 && s.b[0] == '\n' {
- s.b = s.b[1:]
- return false
- }
- n := bytes.IndexByte(s.b, ':')
- if n < 0 {
- s.err = errNeedMore
- return false
- }
- s.key = s.b[:n]
- normalizeHeaderKey(s.key)
- n++
- for len(s.b) > n && s.b[n] == ' ' {
- n++
- }
- s.b = s.b[n:]
- n = bytes.IndexByte(s.b, '\n')
- if n < 0 {
- s.err = errNeedMore
- return false
- }
- s.value = s.b[:n]
- s.b = s.b[n+1:]
-
- if n > 0 && s.value[n-1] == '\r' {
- n--
- }
- for n > 0 && s.value[n-1] == ' ' {
- n--
- }
- s.value = s.value[:n]
- return true
-}
-
-type headerValueScanner struct {
- b []byte
- value []byte
-}
-
-func (s *headerValueScanner) next() bool {
- b := s.b
- if len(b) == 0 {
- return false
- }
- n := bytes.IndexByte(b, ',')
- if n < 0 {
- s.value = stripSpace(b)
- s.b = b[len(b):]
- return true
- }
- s.value = stripSpace(b[:n])
- s.b = b[n+1:]
- return true
-}
-
-func stripSpace(b []byte) []byte {
- for len(b) > 0 && b[0] == ' ' {
- b = b[1:]
- }
- for len(b) > 0 && b[len(b)-1] == ' ' {
- b = b[:len(b)-1]
- }
- return b
-}
-
-func hasHeaderValue(s, value []byte) bool {
- var vs headerValueScanner
- vs.b = s
- for vs.next() {
- if bytes.Equal(vs.value, value) {
- return true
- }
- }
- return false
-}
-
-func nextLine(b []byte) ([]byte, []byte, error) {
- nNext := bytes.IndexByte(b, '\n')
- if nNext < 0 {
- return nil, nil, errNeedMore
- }
- n := nNext
- if n > 0 && b[n-1] == '\r' {
- n--
- }
- return b[:n], b[nNext+1:], nil
-}
-
-func initHeaderKV(kv *argsKV, key, value string) {
- kv.key = getHeaderKeyBytes(kv, key)
- kv.value = append(kv.value[:0], value...)
-}
-
-func getHeaderKeyBytes(kv *argsKV, key string) []byte {
- kv.key = append(kv.key[:0], key...)
- normalizeHeaderKey(kv.key)
- return kv.key
-}
-
-func normalizeHeaderKey(b []byte) {
- n := len(b)
- if n == 0 {
- return
- }
-
- b[0] = toUpperTable[b[0]]
- for i := 1; i < n; i++ {
- p := &b[i]
- if *p == '-' {
- i++
- if i < n {
- b[i] = toUpperTable[b[i]]
- }
- continue
- }
- *p = toLowerTable[*p]
- }
-}
-
-// AppendNormalizedHeaderKey appends normalized header key (name) to dst
-// and returns the resulting dst.
-//
-// Normalized header key starts with uppercase letter. The first letters
-// after dashes are also uppercased. All the other letters are lowercased.
-// Examples:
-//
-// - coNTENT-TYPe -> Content-Type
-// - HOST -> Host
-// - foo-bar-baz -> Foo-Bar-Baz
-func AppendNormalizedHeaderKey(dst []byte, key string) []byte {
- dst = append(dst, key...)
- normalizeHeaderKey(dst[len(dst)-len(key):])
- return dst
-}
-
-// AppendNormalizedHeaderKeyBytes appends normalized header key (name) to dst
-// and returns the resulting dst.
-//
-// Normalized header key starts with uppercase letter. The first letters
-// after dashes are also uppercased. All the other letters are lowercased.
-// Examples:
-//
-// - coNTENT-TYPe -> Content-Type
-// - HOST -> Host
-// - foo-bar-baz -> Foo-Bar-Baz
-func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte {
- return AppendNormalizedHeaderKey(dst, b2s(key))
-}
-
-var (
- errNeedMore = errors.New("need more data: cannot find trailing lf")
- errSmallBuffer = errors.New("small read buffer. Increase ReadBufferSize")
-)
-
-// ErrSmallBuffer is returned when the provided buffer size is too small
-// for reading request and/or response headers.
-//
-// ReadBufferSize value from Server or clients should reduce the number
-// of such errors.
-type ErrSmallBuffer struct {
- error
-}
-
-func mustPeekBuffered(r *bufio.Reader) []byte {
- buf, err := r.Peek(r.Buffered())
- if len(buf) == 0 || err != nil {
- panic(fmt.Sprintf("bufio.Reader.Peek() returned unexpected data (%q, %v)", buf, err))
- }
- return buf
-}
-
-func mustDiscard(r *bufio.Reader, n int) {
- if _, err := r.Discard(n); err != nil {
- panic(fmt.Sprintf("bufio.Reader.Discard(%d) failed: %s", n, err))
- }
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/http.go b/vendor/github.com/VictoriaMetrics/fasthttp/http.go
deleted file mode 100644
index 795f8fbde..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/http.go
+++ /dev/null
@@ -1,1717 +0,0 @@
-package fasthttp
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "mime/multipart"
- "os"
- "sync"
-
- "github.com/valyala/bytebufferpool"
-)
-
-// Request represents HTTP request.
-//
-// It is forbidden copying Request instances. Create new instances
-// and use CopyTo instead.
-//
-// Request instance MUST NOT be used from concurrently running goroutines.
-type Request struct {
- noCopy noCopy
-
- // Request header
- //
- // Copying Header by value is forbidden. Use pointer to Header instead.
- Header RequestHeader
-
- uri URI
- postArgs Args
-
- bodyStream io.Reader
- w requestBodyWriter
- body *bytebufferpool.ByteBuffer
-
- multipartForm *multipart.Form
- multipartFormBoundary string
-
- // Group bool members in order to reduce Request object size.
- parsedURI bool
- parsedPostArgs bool
-
- keepBodyBuffer bool
-
- isTLS bool
-}
-
-// Response represents HTTP response.
-//
-// It is forbidden copying Response instances. Create new instances
-// and use CopyTo instead.
-//
-// Response instance MUST NOT be used from concurrently running goroutines.
-type Response struct {
- noCopy noCopy
-
- // Response header
- //
- // Copying Header by value is forbidden. Use pointer to Header instead.
- Header ResponseHeader
-
- bodyStream io.Reader
- w responseBodyWriter
- body *bytebufferpool.ByteBuffer
-
- // Response.Read() skips reading body if set to true.
- // Use it for reading HEAD responses.
- //
- // Response.Write() skips writing body if set to true.
- // Use it for writing HEAD responses.
- SkipBody bool
-
- keepBodyBuffer bool
-}
-
-// SetHost sets host for the request.
-func (req *Request) SetHost(host string) {
- req.URI().SetHost(host)
-}
-
-// SetHostBytes sets host for the request.
-func (req *Request) SetHostBytes(host []byte) {
- req.URI().SetHostBytes(host)
-}
-
-// Host returns the host for the given request.
-func (req *Request) Host() []byte {
- return req.URI().Host()
-}
-
-// SetRequestURI sets RequestURI.
-func (req *Request) SetRequestURI(requestURI string) {
- req.Header.SetRequestURI(requestURI)
- req.parsedURI = false
-}
-
-// SetRequestURIBytes sets RequestURI.
-func (req *Request) SetRequestURIBytes(requestURI []byte) {
- req.Header.SetRequestURIBytes(requestURI)
- req.parsedURI = false
-}
-
-// RequestURI returns request's URI.
-func (req *Request) RequestURI() []byte {
- if req.parsedURI {
- requestURI := req.uri.RequestURI()
- req.SetRequestURIBytes(requestURI)
- }
- return req.Header.RequestURI()
-}
-
-// StatusCode returns response status code.
-func (resp *Response) StatusCode() int {
- return resp.Header.StatusCode()
-}
-
-// SetStatusCode sets response status code.
-func (resp *Response) SetStatusCode(statusCode int) {
- resp.Header.SetStatusCode(statusCode)
-}
-
-// ConnectionClose returns true if 'Connection: close' header is set.
-func (resp *Response) ConnectionClose() bool {
- return resp.Header.ConnectionClose()
-}
-
-// SetConnectionClose sets 'Connection: close' header.
-func (resp *Response) SetConnectionClose() {
- resp.Header.SetConnectionClose()
-}
-
-// ConnectionClose returns true if 'Connection: close' header is set.
-func (req *Request) ConnectionClose() bool {
- return req.Header.ConnectionClose()
-}
-
-// SetConnectionClose sets 'Connection: close' header.
-func (req *Request) SetConnectionClose() {
- req.Header.SetConnectionClose()
-}
-
-// SendFile registers file on the given path to be used as response body
-// when Write is called.
-//
-// Note that SendFile doesn't set Content-Type, so set it yourself
-// with Header.SetContentType.
-func (resp *Response) SendFile(path string) error {
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- fileInfo, err := f.Stat()
- if err != nil {
- f.Close()
- return err
- }
- size64 := fileInfo.Size()
- size := int(size64)
- if int64(size) != size64 {
- size = -1
- }
-
- resp.Header.SetLastModified(fileInfo.ModTime())
- resp.SetBodyStream(f, size)
- return nil
-}
-
-// SetBodyStream sets request body stream and, optionally body size.
-//
-// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes
-// before returning io.EOF.
-//
-// If bodySize < 0, then bodyStream is read until io.EOF.
-//
-// bodyStream.Close() is called after finishing reading all body data
-// if it implements io.Closer.
-//
-// Note that GET and HEAD requests cannot have body.
-//
-// See also SetBodyStreamWriter.
-func (req *Request) SetBodyStream(bodyStream io.Reader, bodySize int) {
- req.ResetBody()
- req.bodyStream = bodyStream
- req.Header.SetContentLength(bodySize)
-}
-
-// SetBodyStream sets response body stream and, optionally body size.
-//
-// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes
-// before returning io.EOF.
-//
-// If bodySize < 0, then bodyStream is read until io.EOF.
-//
-// bodyStream.Close() is called after finishing reading all body data
-// if it implements io.Closer.
-//
-// See also SetBodyStreamWriter.
-func (resp *Response) SetBodyStream(bodyStream io.Reader, bodySize int) {
- resp.ResetBody()
- resp.bodyStream = bodyStream
- resp.Header.SetContentLength(bodySize)
-}
-
-// IsBodyStream returns true if body is set via SetBodyStream*
-func (req *Request) IsBodyStream() bool {
- return req.bodyStream != nil
-}
-
-// IsBodyStream returns true if body is set via SetBodyStream*
-func (resp *Response) IsBodyStream() bool {
- return resp.bodyStream != nil
-}
-
-// SetBodyStreamWriter registers the given sw for populating request body.
-//
-// This function may be used in the following cases:
-//
-// - if request body is too big (more than 10MB).
-// - if request body is streamed from slow external sources.
-// - if request body must be streamed to the server in chunks
-// (aka `http client push` or `chunked transfer-encoding`).
-//
-// Note that GET and HEAD requests cannot have body.
-//
-// / See also SetBodyStream.
-func (req *Request) SetBodyStreamWriter(sw StreamWriter) {
- sr := NewStreamReader(sw)
- req.SetBodyStream(sr, -1)
-}
-
-// SetBodyStreamWriter registers the given sw for populating response body.
-//
-// This function may be used in the following cases:
-//
-// - if response body is too big (more than 10MB).
-// - if response body is streamed from slow external sources.
-// - if response body must be streamed to the client in chunks
-// (aka `http server push` or `chunked transfer-encoding`).
-//
-// See also SetBodyStream.
-func (resp *Response) SetBodyStreamWriter(sw StreamWriter) {
- sr := NewStreamReader(sw)
- resp.SetBodyStream(sr, -1)
-}
-
-// BodyWriter returns writer for populating response body.
-//
-// If used inside RequestHandler, the returned writer must not be used
-// after returning from RequestHandler. Use RequestCtx.Write
-// or SetBodyStreamWriter in this case.
-func (resp *Response) BodyWriter() io.Writer {
- resp.w.r = resp
- return &resp.w
-}
-
-// BodyWriter returns writer for populating request body.
-func (req *Request) BodyWriter() io.Writer {
- req.w.r = req
- return &req.w
-}
-
-type responseBodyWriter struct {
- r *Response
-}
-
-func (w *responseBodyWriter) Write(p []byte) (int, error) {
- w.r.AppendBody(p)
- return len(p), nil
-}
-
-type requestBodyWriter struct {
- r *Request
-}
-
-func (w *requestBodyWriter) Write(p []byte) (int, error) {
- w.r.AppendBody(p)
- return len(p), nil
-}
-
-// Body returns response body.
-//
-// The returned body is valid until the response modification.
-func (resp *Response) Body() []byte {
- if resp.bodyStream != nil {
- bodyBuf := resp.bodyBuffer()
- bodyBuf.Reset()
- _, err := copyZeroAlloc(bodyBuf, resp.bodyStream)
- resp.closeBodyStream()
- if err != nil {
- bodyBuf.SetString(err.Error())
- }
- }
- return resp.bodyBytes()
-}
-
-func (resp *Response) bodyBytes() []byte {
- if resp.body == nil {
- return nil
- }
- return resp.body.B
-}
-
-func (req *Request) bodyBytes() []byte {
- if req.body == nil {
- return nil
- }
- return req.body.B
-}
-
-func (resp *Response) bodyBuffer() *bytebufferpool.ByteBuffer {
- if resp.body == nil {
- resp.body = responseBodyPool.Get()
- }
- return resp.body
-}
-
-func (req *Request) bodyBuffer() *bytebufferpool.ByteBuffer {
- if req.body == nil {
- req.body = requestBodyPool.Get()
- }
- return req.body
-}
-
-var (
- responseBodyPool bytebufferpool.Pool
- requestBodyPool bytebufferpool.Pool
-)
-
-// BodyGunzip returns un-gzipped body data.
-//
-// This method may be used if the request header contains
-// 'Content-Encoding: gzip' for reading un-gzipped body.
-// Use Body for reading gzipped request body.
-func (req *Request) BodyGunzip() ([]byte, error) {
- return gunzipData(req.Body())
-}
-
-// BodyGunzip returns un-gzipped body data.
-//
-// This method may be used if the response header contains
-// 'Content-Encoding: gzip' for reading un-gzipped body.
-// Use Body for reading gzipped response body.
-func (resp *Response) BodyGunzip() ([]byte, error) {
- return gunzipData(resp.Body())
-}
-
-func gunzipData(p []byte) ([]byte, error) {
- var bb ByteBuffer
- _, err := WriteGunzip(&bb, p)
- if err != nil {
- return nil, err
- }
- return bb.B, nil
-}
-
-// BodyInflate returns inflated body data.
-//
-// This method may be used if the response header contains
-// 'Content-Encoding: deflate' for reading inflated request body.
-// Use Body for reading deflated request body.
-func (req *Request) BodyInflate() ([]byte, error) {
- return inflateData(req.Body())
-}
-
-// BodyInflate returns inflated body data.
-//
-// This method may be used if the response header contains
-// 'Content-Encoding: deflate' for reading inflated response body.
-// Use Body for reading deflated response body.
-func (resp *Response) BodyInflate() ([]byte, error) {
- return inflateData(resp.Body())
-}
-
-func inflateData(p []byte) ([]byte, error) {
- var bb ByteBuffer
- _, err := WriteInflate(&bb, p)
- if err != nil {
- return nil, err
- }
- return bb.B, nil
-}
-
-// BodyWriteTo writes request body to w.
-func (req *Request) BodyWriteTo(w io.Writer) error {
- if req.bodyStream != nil {
- _, err := copyZeroAlloc(w, req.bodyStream)
- req.closeBodyStream()
- return err
- }
- if req.onlyMultipartForm() {
- return WriteMultipartForm(w, req.multipartForm, req.multipartFormBoundary)
- }
- _, err := w.Write(req.bodyBytes())
- return err
-}
-
-// BodyWriteTo writes response body to w.
-func (resp *Response) BodyWriteTo(w io.Writer) error {
- if resp.bodyStream != nil {
- _, err := copyZeroAlloc(w, resp.bodyStream)
- resp.closeBodyStream()
- return err
- }
- _, err := w.Write(resp.bodyBytes())
- return err
-}
-
-// AppendBody appends p to response body.
-//
-// It is safe re-using p after the function returns.
-func (resp *Response) AppendBody(p []byte) {
- resp.AppendBodyString(b2s(p))
-}
-
-// AppendBodyString appends s to response body.
-func (resp *Response) AppendBodyString(s string) {
- resp.closeBodyStream()
- resp.bodyBuffer().WriteString(s)
-}
-
-// SetBody sets response body.
-//
-// It is safe re-using body argument after the function returns.
-func (resp *Response) SetBody(body []byte) {
- resp.SetBodyString(b2s(body))
-}
-
-// SetBodyString sets response body.
-func (resp *Response) SetBodyString(body string) {
- resp.closeBodyStream()
- bodyBuf := resp.bodyBuffer()
- bodyBuf.Reset()
- bodyBuf.WriteString(body)
-}
-
-// ResetBody resets response body.
-func (resp *Response) ResetBody() {
- resp.closeBodyStream()
- if resp.body != nil {
- if resp.keepBodyBuffer {
- resp.body.Reset()
- } else {
- responseBodyPool.Put(resp.body)
- resp.body = nil
- }
- }
-}
-
-// ReleaseBody retires the response body if it is greater than "size" bytes.
-//
-// This permits GC to reclaim the large buffer. If used, must be before
-// ReleaseResponse.
-//
-// Use this method only if you really understand how it works.
-// The majority of workloads don't need this method.
-func (resp *Response) ReleaseBody(size int) {
- if cap(resp.body.B) > size {
- resp.closeBodyStream()
- resp.body = nil
- }
-}
-
-// ReleaseBody retires the request body if it is greater than "size" bytes.
-//
-// This permits GC to reclaim the large buffer. If used, must be before
-// ReleaseRequest.
-//
-// Use this method only if you really understand how it works.
-// The majority of workloads don't need this method.
-func (req *Request) ReleaseBody(size int) {
- if cap(req.body.B) > size {
- req.closeBodyStream()
- req.body = nil
- }
-}
-
-// SwapBody swaps response body with the given body and returns
-// the previous response body.
-//
-// It is forbidden to use the body passed to SwapBody after
-// the function returns.
-func (resp *Response) SwapBody(body []byte) []byte {
- bb := resp.bodyBuffer()
-
- if resp.bodyStream != nil {
- bb.Reset()
- _, err := copyZeroAlloc(bb, resp.bodyStream)
- resp.closeBodyStream()
- if err != nil {
- bb.Reset()
- bb.SetString(err.Error())
- }
- }
-
- oldBody := bb.B
- bb.B = body
- return oldBody
-}
-
-// SwapBody swaps request body with the given body and returns
-// the previous request body.
-//
-// It is forbidden to use the body passed to SwapBody after
-// the function returns.
-func (req *Request) SwapBody(body []byte) []byte {
- bb := req.bodyBuffer()
-
- if req.bodyStream != nil {
- bb.Reset()
- _, err := copyZeroAlloc(bb, req.bodyStream)
- req.closeBodyStream()
- if err != nil {
- bb.Reset()
- bb.SetString(err.Error())
- }
- }
-
- oldBody := bb.B
- bb.B = body
- return oldBody
-}
-
-// Body returns request body.
-//
-// The returned body is valid until the request modification.
-func (req *Request) Body() []byte {
- if req.bodyStream != nil {
- bodyBuf := req.bodyBuffer()
- bodyBuf.Reset()
- _, err := copyZeroAlloc(bodyBuf, req.bodyStream)
- req.closeBodyStream()
- if err != nil {
- bodyBuf.SetString(err.Error())
- }
- } else if req.onlyMultipartForm() {
- body, err := marshalMultipartForm(req.multipartForm, req.multipartFormBoundary)
- if err != nil {
- return []byte(err.Error())
- }
- return body
- }
- return req.bodyBytes()
-}
-
-// AppendBody appends p to request body.
-//
-// It is safe re-using p after the function returns.
-func (req *Request) AppendBody(p []byte) {
- req.AppendBodyString(b2s(p))
-}
-
-// AppendBodyString appends s to request body.
-func (req *Request) AppendBodyString(s string) {
- req.RemoveMultipartFormFiles()
- req.closeBodyStream()
- req.bodyBuffer().WriteString(s)
-}
-
-// SetBody sets request body.
-//
-// It is safe re-using body argument after the function returns.
-func (req *Request) SetBody(body []byte) {
- req.SetBodyString(b2s(body))
-}
-
-// SetBodyString sets request body.
-func (req *Request) SetBodyString(body string) {
- req.RemoveMultipartFormFiles()
- req.closeBodyStream()
- req.bodyBuffer().SetString(body)
-}
-
-// ResetBody resets request body.
-func (req *Request) ResetBody() {
- req.RemoveMultipartFormFiles()
- req.closeBodyStream()
- if req.body != nil {
- if req.keepBodyBuffer {
- req.body.Reset()
- } else {
- requestBodyPool.Put(req.body)
- req.body = nil
- }
- }
-}
-
-// CopyTo copies req contents to dst except of body stream.
-func (req *Request) CopyTo(dst *Request) {
- req.copyToSkipBody(dst)
- if req.body != nil {
- dst.bodyBuffer().Set(req.body.B)
- } else if dst.body != nil {
- dst.body.Reset()
- }
-}
-
-func (req *Request) copyToSkipBody(dst *Request) {
- dst.Reset()
- req.Header.CopyTo(&dst.Header)
-
- req.uri.CopyTo(&dst.uri)
- dst.parsedURI = req.parsedURI
-
- req.postArgs.CopyTo(&dst.postArgs)
- dst.parsedPostArgs = req.parsedPostArgs
- dst.isTLS = req.isTLS
-
- // do not copy multipartForm - it will be automatically
- // re-created on the first call to MultipartForm.
-}
-
-// CopyTo copies resp contents to dst except of body stream.
-func (resp *Response) CopyTo(dst *Response) {
- resp.copyToSkipBody(dst)
- if resp.body != nil {
- dst.bodyBuffer().Set(resp.body.B)
- } else if dst.body != nil {
- dst.body.Reset()
- }
-}
-
-func (resp *Response) copyToSkipBody(dst *Response) {
- dst.Reset()
- resp.Header.CopyTo(&dst.Header)
- dst.SkipBody = resp.SkipBody
-}
-
-func swapRequestBody(a, b *Request) {
- a.body, b.body = b.body, a.body
- a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream
-}
-
-func swapResponseBody(a, b *Response) {
- a.body, b.body = b.body, a.body
- a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream
-}
-
-// URI returns request URI
-func (req *Request) URI() *URI {
- req.parseURI()
- return &req.uri
-}
-
-func (req *Request) parseURI() {
- if req.parsedURI {
- return
- }
- req.parsedURI = true
-
- req.uri.parseQuick(req.Header.RequestURI(), &req.Header, req.isTLS)
-}
-
-// PostArgs returns POST arguments.
-func (req *Request) PostArgs() *Args {
- req.parsePostArgs()
- return &req.postArgs
-}
-
-func (req *Request) parsePostArgs() {
- if req.parsedPostArgs {
- return
- }
- req.parsedPostArgs = true
-
- if !bytes.HasPrefix(req.Header.ContentType(), strPostArgsContentType) {
- return
- }
- req.postArgs.ParseBytes(req.bodyBytes())
-}
-
-// ErrNoMultipartForm means that the request's Content-Type
-// isn't 'multipart/form-data'.
-var ErrNoMultipartForm = errors.New("request has no multipart/form-data Content-Type")
-
-// MultipartForm returns requests's multipart form.
-//
-// Returns ErrNoMultipartForm if request's Content-Type
-// isn't 'multipart/form-data'.
-//
-// RemoveMultipartFormFiles must be called after returned multipart form
-// is processed.
-func (req *Request) MultipartForm() (*multipart.Form, error) {
- if req.multipartForm != nil {
- return req.multipartForm, nil
- }
-
- req.multipartFormBoundary = string(req.Header.MultipartFormBoundary())
- if len(req.multipartFormBoundary) == 0 {
- return nil, ErrNoMultipartForm
- }
-
- ce := req.Header.peek(strContentEncoding)
- body := req.bodyBytes()
- if bytes.Equal(ce, strGzip) {
- // Do not care about memory usage here.
- var err error
- if body, err = AppendGunzipBytes(nil, body); err != nil {
- return nil, fmt.Errorf("cannot gunzip request body: %s", err)
- }
- } else if len(ce) > 0 {
- return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce)
- }
-
- f, err := readMultipartForm(bytes.NewReader(body), req.multipartFormBoundary, len(body), len(body))
- if err != nil {
- return nil, err
- }
- req.multipartForm = f
- return f, nil
-}
-
-func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) {
- var buf ByteBuffer
- if err := WriteMultipartForm(&buf, f, boundary); err != nil {
- return nil, err
- }
- return buf.B, nil
-}
-
-// WriteMultipartForm writes the given multipart form f with the given
-// boundary to w.
-func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error {
- // Do not care about memory allocations here, since multipart
- // form processing is slooow.
- if len(boundary) == 0 {
- panic("BUG: form boundary cannot be empty")
- }
-
- mw := multipart.NewWriter(w)
- if err := mw.SetBoundary(boundary); err != nil {
- return fmt.Errorf("cannot use form boundary %q: %s", boundary, err)
- }
-
- // marshal values
- for k, vv := range f.Value {
- for _, v := range vv {
- if err := mw.WriteField(k, v); err != nil {
- return fmt.Errorf("cannot write form field %q value %q: %s", k, v, err)
- }
- }
- }
-
- // marshal files
- for k, fvv := range f.File {
- for _, fv := range fvv {
- vw, err := mw.CreateFormFile(k, fv.Filename)
- if err != nil {
- return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err)
- }
- fh, err := fv.Open()
- if err != nil {
- return fmt.Errorf("cannot open form file %q (%q): %s", k, fv.Filename, err)
- }
- if _, err = copyZeroAlloc(vw, fh); err != nil {
- return fmt.Errorf("error when copying form file %q (%q): %s", k, fv.Filename, err)
- }
- if err = fh.Close(); err != nil {
- return fmt.Errorf("cannot close form file %q (%q): %s", k, fv.Filename, err)
- }
- }
- }
-
- if err := mw.Close(); err != nil {
- return fmt.Errorf("error when closing multipart form writer: %s", err)
- }
-
- return nil
-}
-
-func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize int) (*multipart.Form, error) {
- // Do not care about memory allocations here, since they are tiny
- // compared to multipart data (aka multi-MB files) usually sent
- // in multipart/form-data requests.
-
- if size <= 0 {
- panic(fmt.Sprintf("BUG: form size must be greater than 0. Given %d", size))
- }
- lr := io.LimitReader(r, int64(size))
- mr := multipart.NewReader(lr, boundary)
- f, err := mr.ReadForm(int64(maxInMemoryFileSize))
- if err != nil {
- return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err)
- }
- return f, nil
-}
-
-// Reset clears request contents.
-func (req *Request) Reset() {
- req.Header.Reset()
- req.resetSkipHeader()
-}
-
-func (req *Request) resetSkipHeader() {
- req.ResetBody()
- req.uri.Reset()
- req.parsedURI = false
- req.postArgs.Reset()
- req.parsedPostArgs = false
- req.isTLS = false
-}
-
-// RemoveMultipartFormFiles removes multipart/form-data temporary files
-// associated with the request.
-func (req *Request) RemoveMultipartFormFiles() {
- if req.multipartForm != nil {
- // Do not check for error, since these files may be deleted or moved
- // to new places by user code.
- req.multipartForm.RemoveAll()
- req.multipartForm = nil
- }
- req.multipartFormBoundary = ""
-}
-
-// Reset clears response contents.
-func (resp *Response) Reset() {
- resp.Header.Reset()
- resp.resetSkipHeader()
- resp.SkipBody = false
-}
-
-func (resp *Response) resetSkipHeader() {
- resp.ResetBody()
-}
-
-// Read reads request (including body) from the given r.
-//
-// RemoveMultipartFormFiles or Reset must be called after
-// reading multipart/form-data request in order to delete temporarily
-// uploaded files.
-//
-// If MayContinue returns true, the caller must:
-//
-// - Either send StatusExpectationFailed response if request headers don't
-// satisfy the caller.
-// - Or send StatusContinue response before reading request body
-// with ContinueReadBody.
-// - Or close the connection.
-//
-// io.EOF is returned if r is closed before reading the first header byte.
-func (req *Request) Read(r *bufio.Reader) error {
- return req.ReadLimitBody(r, 0)
-}
-
-const defaultMaxInMemoryFileSize = 16 * 1024 * 1024
-
-var errGetOnly = errors.New("non-GET request received")
-
-// ReadLimitBody reads request from the given r, limiting the body size.
-//
-// If maxBodySize > 0 and the body size exceeds maxBodySize,
-// then ErrBodyTooLarge is returned.
-//
-// RemoveMultipartFormFiles or Reset must be called after
-// reading multipart/form-data request in order to delete temporarily
-// uploaded files.
-//
-// If MayContinue returns true, the caller must:
-//
-// - Either send StatusExpectationFailed response if request headers don't
-// satisfy the caller.
-// - Or send StatusContinue response before reading request body
-// with ContinueReadBody.
-// - Or close the connection.
-//
-// io.EOF is returned if r is closed before reading the first header byte.
-func (req *Request) ReadLimitBody(r *bufio.Reader, maxBodySize int) error {
- req.resetSkipHeader()
- return req.readLimitBody(r, maxBodySize, false)
-}
-
-func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool) error {
- // Do not reset the request here - the caller must reset it before
- // calling this method.
-
- err := req.Header.Read(r)
- if err != nil {
- return err
- }
- if getOnly && !req.Header.IsGet() {
- return errGetOnly
- }
-
- if req.Header.noBody() {
- return nil
- }
-
- if req.MayContinue() {
- // 'Expect: 100-continue' header found. Let the caller deciding
- // whether to read request body or
- // to return StatusExpectationFailed.
- return nil
- }
-
- return req.ContinueReadBody(r, maxBodySize)
-}
-
-// MayContinue returns true if the request contains
-// 'Expect: 100-continue' header.
-//
-// The caller must do one of the following actions if MayContinue returns true:
-//
-// - Either send StatusExpectationFailed response if request headers don't
-// satisfy the caller.
-// - Or send StatusContinue response before reading request body
-// with ContinueReadBody.
-// - Or close the connection.
-func (req *Request) MayContinue() bool {
- return bytes.Equal(req.Header.peek(strExpect), str100Continue)
-}
-
-// ContinueReadBody reads request body if request header contains
-// 'Expect: 100-continue'.
-//
-// The caller must send StatusContinue response before calling this method.
-//
-// If maxBodySize > 0 and the body size exceeds maxBodySize,
-// then ErrBodyTooLarge is returned.
-func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error {
- var err error
- contentLength := req.Header.ContentLength()
- if contentLength > 0 {
- if maxBodySize > 0 && contentLength > maxBodySize {
- return ErrBodyTooLarge
- }
-
- // Pre-read multipart form data of known length.
- // This way we limit memory usage for large file uploads, since their contents
- // is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize.
- req.multipartFormBoundary = string(req.Header.MultipartFormBoundary())
- if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 {
- req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize)
- if err != nil {
- req.Reset()
- }
- return err
- }
- }
-
- if contentLength == -2 {
- // identity body has no sense for http requests, since
- // the end of body is determined by connection close.
- // So just ignore request body for requests without
- // 'Content-Length' and 'Transfer-Encoding' headers.
- req.Header.SetContentLength(0)
- return nil
- }
-
- bodyBuf := req.bodyBuffer()
- bodyBuf.Reset()
- bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B)
- if err != nil {
- req.Reset()
- return err
- }
- req.Header.SetContentLength(len(bodyBuf.B))
- return nil
-}
-
-// Read reads response (including body) from the given r.
-//
-// io.EOF is returned if r is closed before reading the first header byte.
-func (resp *Response) Read(r *bufio.Reader) error {
- return resp.ReadLimitBody(r, 0)
-}
-
-// ReadLimitBody reads response from the given r, limiting the body size.
-//
-// If maxBodySize > 0 and the body size exceeds maxBodySize,
-// then ErrBodyTooLarge is returned.
-//
-// io.EOF is returned if r is closed before reading the first header byte.
-func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error {
- resp.resetSkipHeader()
- err := resp.Header.Read(r)
- if err != nil {
- return err
- }
- if resp.Header.StatusCode() == StatusContinue {
- // Read the next response according to http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html .
- if err = resp.Header.Read(r); err != nil {
- return err
- }
- }
-
- if !resp.mustSkipBody() {
- bodyBuf := resp.bodyBuffer()
- bodyBuf.Reset()
- bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B)
- if err != nil {
- resp.Reset()
- return err
- }
- resp.Header.SetContentLength(len(bodyBuf.B))
- }
- return nil
-}
-
-func (resp *Response) mustSkipBody() bool {
- return resp.SkipBody || resp.Header.mustSkipContentLength()
-}
-
-var errRequestHostRequired = errors.New("missing required Host header in request")
-
-// WriteTo writes request to w. It implements io.WriterTo.
-func (req *Request) WriteTo(w io.Writer) (int64, error) {
- return writeBufio(req, w)
-}
-
-// WriteTo writes response to w. It implements io.WriterTo.
-func (resp *Response) WriteTo(w io.Writer) (int64, error) {
- return writeBufio(resp, w)
-}
-
-func writeBufio(hw httpWriter, w io.Writer) (int64, error) {
- sw := acquireStatsWriter(w)
- bw := acquireBufioWriter(sw)
- err1 := hw.Write(bw)
- err2 := bw.Flush()
- releaseBufioWriter(bw)
- n := sw.bytesWritten
- releaseStatsWriter(sw)
-
- err := err1
- if err == nil {
- err = err2
- }
- return n, err
-}
-
-type statsWriter struct {
- w io.Writer
- bytesWritten int64
-}
-
-func (w *statsWriter) Write(p []byte) (int, error) {
- n, err := w.w.Write(p)
- w.bytesWritten += int64(n)
- return n, err
-}
-
-func acquireStatsWriter(w io.Writer) *statsWriter {
- v := statsWriterPool.Get()
- if v == nil {
- return &statsWriter{
- w: w,
- }
- }
- sw := v.(*statsWriter)
- sw.w = w
- return sw
-}
-
-func releaseStatsWriter(sw *statsWriter) {
- sw.w = nil
- sw.bytesWritten = 0
- statsWriterPool.Put(sw)
-}
-
-var statsWriterPool sync.Pool
-
-func acquireBufioWriter(w io.Writer) *bufio.Writer {
- v := bufioWriterPool.Get()
- if v == nil {
- return bufio.NewWriter(w)
- }
- bw := v.(*bufio.Writer)
- bw.Reset(w)
- return bw
-}
-
-func releaseBufioWriter(bw *bufio.Writer) {
- bufioWriterPool.Put(bw)
-}
-
-var bufioWriterPool sync.Pool
-
-func (req *Request) onlyMultipartForm() bool {
- return req.multipartForm != nil && (req.body == nil || len(req.body.B) == 0)
-}
-
-// Write writes request to w.
-//
-// Write doesn't flush request to w for performance reasons.
-//
-// See also WriteTo.
-func (req *Request) Write(w *bufio.Writer) error {
- if len(req.Header.Host()) == 0 || req.parsedURI {
- uri := req.URI()
- host := uri.Host()
- if len(host) == 0 {
- return errRequestHostRequired
- }
- req.Header.SetHostBytes(host)
- req.Header.SetRequestURIBytes(uri.RequestURI())
- }
-
- if req.bodyStream != nil {
- return req.writeBodyStream(w)
- }
-
- body := req.bodyBytes()
- var err error
- if req.onlyMultipartForm() {
- body, err = marshalMultipartForm(req.multipartForm, req.multipartFormBoundary)
- if err != nil {
- return fmt.Errorf("error when marshaling multipart form: %s", err)
- }
- req.Header.SetMultipartFormBoundary(req.multipartFormBoundary)
- }
-
- hasBody := !req.Header.noBody()
- if hasBody {
- req.Header.SetContentLength(len(body))
- }
- if err = req.Header.Write(w); err != nil {
- return err
- }
- if hasBody {
- _, err = w.Write(body)
- } else if len(body) > 0 {
- return fmt.Errorf("non-zero body for non-POST request. body=%q", body)
- }
- return err
-}
-
-// WriteGzip writes response with gzipped body to w.
-//
-// The method gzips response body and sets 'Content-Encoding: gzip'
-// header before writing response to w.
-//
-// WriteGzip doesn't flush response to w for performance reasons.
-func (resp *Response) WriteGzip(w *bufio.Writer) error {
- return resp.WriteGzipLevel(w, CompressDefaultCompression)
-}
-
-// WriteGzipLevel writes response with gzipped body to w.
-//
-// Level is the desired compression level:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-//
-// The method gzips response body and sets 'Content-Encoding: gzip'
-// header before writing response to w.
-//
-// WriteGzipLevel doesn't flush response to w for performance reasons.
-func (resp *Response) WriteGzipLevel(w *bufio.Writer, level int) error {
- if err := resp.gzipBody(level); err != nil {
- return err
- }
- return resp.Write(w)
-}
-
-// WriteDeflate writes response with deflated body to w.
-//
-// The method deflates response body and sets 'Content-Encoding: deflate'
-// header before writing response to w.
-//
-// WriteDeflate doesn't flush response to w for performance reasons.
-func (resp *Response) WriteDeflate(w *bufio.Writer) error {
- return resp.WriteDeflateLevel(w, CompressDefaultCompression)
-}
-
-// WriteDeflateLevel writes response with deflated body to w.
-//
-// Level is the desired compression level:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-//
-// The method deflates response body and sets 'Content-Encoding: deflate'
-// header before writing response to w.
-//
-// WriteDeflateLevel doesn't flush response to w for performance reasons.
-func (resp *Response) WriteDeflateLevel(w *bufio.Writer, level int) error {
- if err := resp.deflateBody(level); err != nil {
- return err
- }
- return resp.Write(w)
-}
-
-func (resp *Response) gzipBody(level int) error {
- if len(resp.Header.peek(strContentEncoding)) > 0 {
- // It looks like the body is already compressed.
- // Do not compress it again.
- return nil
- }
-
- if !resp.Header.isCompressibleContentType() {
- // The content-type cannot be compressed.
- return nil
- }
-
- if resp.bodyStream != nil {
- // Reset Content-Length to -1, since it is impossible
- // to determine body size beforehand of streamed compression.
- // For https://github.com/valyala/fasthttp/issues/176 .
- resp.Header.SetContentLength(-1)
-
- // Do not care about memory allocations here, since gzip is slow
- // and allocates a lot of memory by itself.
- bs := resp.bodyStream
- resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
- zw := acquireStacklessGzipWriter(sw, level)
- fw := &flushWriter{
- wf: zw,
- bw: sw,
- }
- copyZeroAlloc(fw, bs)
- releaseStacklessGzipWriter(zw, level)
- if bsc, ok := bs.(io.Closer); ok {
- bsc.Close()
- }
- })
- } else {
- bodyBytes := resp.bodyBytes()
- if len(bodyBytes) < minCompressLen {
- // There is no sense in spending CPU time on small body compression,
- // since there is a very high probability that the compressed
- // body size will be bigger than the original body size.
- return nil
- }
- w := responseBodyPool.Get()
- w.B = AppendGzipBytesLevel(w.B, bodyBytes, level)
-
- // Hack: swap resp.body with w.
- if resp.body != nil {
- responseBodyPool.Put(resp.body)
- }
- resp.body = w
- }
- resp.Header.SetCanonical(strContentEncoding, strGzip)
- return nil
-}
-
-func (resp *Response) deflateBody(level int) error {
- if len(resp.Header.peek(strContentEncoding)) > 0 {
- // It looks like the body is already compressed.
- // Do not compress it again.
- return nil
- }
-
- if !resp.Header.isCompressibleContentType() {
- // The content-type cannot be compressed.
- return nil
- }
-
- if resp.bodyStream != nil {
- // Reset Content-Length to -1, since it is impossible
- // to determine body size beforehand of streamed compression.
- // For https://github.com/valyala/fasthttp/issues/176 .
- resp.Header.SetContentLength(-1)
-
- // Do not care about memory allocations here, since flate is slow
- // and allocates a lot of memory by itself.
- bs := resp.bodyStream
- resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) {
- zw := acquireStacklessDeflateWriter(sw, level)
- fw := &flushWriter{
- wf: zw,
- bw: sw,
- }
- copyZeroAlloc(fw, bs)
- releaseStacklessDeflateWriter(zw, level)
- if bsc, ok := bs.(io.Closer); ok {
- bsc.Close()
- }
- })
- } else {
- bodyBytes := resp.bodyBytes()
- if len(bodyBytes) < minCompressLen {
- // There is no sense in spending CPU time on small body compression,
- // since there is a very high probability that the compressed
- // body size will be bigger than the original body size.
- return nil
- }
- w := responseBodyPool.Get()
- w.B = AppendDeflateBytesLevel(w.B, bodyBytes, level)
-
- // Hack: swap resp.body with w.
- if resp.body != nil {
- responseBodyPool.Put(resp.body)
- }
- resp.body = w
- }
- resp.Header.SetCanonical(strContentEncoding, strDeflate)
- return nil
-}
-
-// Bodies with sizes smaller than minCompressLen aren't compressed at all
-const minCompressLen = 200
-
-type writeFlusher interface {
- io.Writer
- Flush() error
-}
-
-type flushWriter struct {
- wf writeFlusher
- bw *bufio.Writer
-}
-
-func (w *flushWriter) Write(p []byte) (int, error) {
- n, err := w.wf.Write(p)
- if err != nil {
- return 0, err
- }
- if err = w.wf.Flush(); err != nil {
- return 0, err
- }
- if err = w.bw.Flush(); err != nil {
- return 0, err
- }
- return n, nil
-}
-
-// Write writes response to w.
-//
-// Write doesn't flush response to w for performance reasons.
-//
-// See also WriteTo.
-func (resp *Response) Write(w *bufio.Writer) error {
- sendBody := !resp.mustSkipBody()
-
- if resp.bodyStream != nil {
- return resp.writeBodyStream(w, sendBody)
- }
-
- body := resp.bodyBytes()
- bodyLen := len(body)
- if sendBody || bodyLen > 0 {
- resp.Header.SetContentLength(bodyLen)
- }
- if err := resp.Header.Write(w); err != nil {
- return err
- }
- if sendBody {
- if _, err := w.Write(body); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (req *Request) writeBodyStream(w *bufio.Writer) error {
- var err error
-
- contentLength := req.Header.ContentLength()
- if contentLength < 0 {
- lrSize := limitedReaderSize(req.bodyStream)
- if lrSize >= 0 {
- contentLength = int(lrSize)
- if int64(contentLength) != lrSize {
- contentLength = -1
- }
- if contentLength >= 0 {
- req.Header.SetContentLength(contentLength)
- }
- }
- }
- if contentLength >= 0 {
- if err = req.Header.Write(w); err == nil {
- err = writeBodyFixedSize(w, req.bodyStream, int64(contentLength))
- }
- } else {
- req.Header.SetContentLength(-1)
- if err = req.Header.Write(w); err == nil {
- err = writeBodyChunked(w, req.bodyStream)
- }
- }
- err1 := req.closeBodyStream()
- if err == nil {
- err = err1
- }
- return err
-}
-
-func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) error {
- var err error
-
- contentLength := resp.Header.ContentLength()
- if contentLength < 0 {
- lrSize := limitedReaderSize(resp.bodyStream)
- if lrSize >= 0 {
- contentLength = int(lrSize)
- if int64(contentLength) != lrSize {
- contentLength = -1
- }
- if contentLength >= 0 {
- resp.Header.SetContentLength(contentLength)
- }
- }
- }
- if contentLength >= 0 {
- if err = resp.Header.Write(w); err == nil && sendBody {
- err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength))
- }
- } else {
- resp.Header.SetContentLength(-1)
- if err = resp.Header.Write(w); err == nil && sendBody {
- err = writeBodyChunked(w, resp.bodyStream)
- }
- }
- err1 := resp.closeBodyStream()
- if err == nil {
- err = err1
- }
- return err
-}
-
-func (req *Request) closeBodyStream() error {
- if req.bodyStream == nil {
- return nil
- }
- var err error
- if bsc, ok := req.bodyStream.(io.Closer); ok {
- err = bsc.Close()
- }
- req.bodyStream = nil
- return err
-}
-
-func (resp *Response) closeBodyStream() error {
- if resp.bodyStream == nil {
- return nil
- }
- var err error
- if bsc, ok := resp.bodyStream.(io.Closer); ok {
- err = bsc.Close()
- }
- resp.bodyStream = nil
- return err
-}
-
-// String returns request representation.
-//
-// Returns error message instead of request representation on error.
-//
-// Use Write instead of String for performance-critical code.
-func (req *Request) String() string {
- return getHTTPString(req)
-}
-
-// String returns response representation.
-//
-// Returns error message instead of response representation on error.
-//
-// Use Write instead of String for performance-critical code.
-func (resp *Response) String() string {
- return getHTTPString(resp)
-}
-
-func getHTTPString(hw httpWriter) string {
- w := AcquireByteBuffer()
- bw := bufio.NewWriter(w)
- if err := hw.Write(bw); err != nil {
- return err.Error()
- }
- if err := bw.Flush(); err != nil {
- return err.Error()
- }
- s := string(w.B)
- ReleaseByteBuffer(w)
- return s
-}
-
-type httpWriter interface {
- Write(w *bufio.Writer) error
-}
-
-func writeBodyChunked(w *bufio.Writer, r io.Reader) error {
- bufv := copyBufPool.Get().(*copyBuf)
- buf := bufv.b[:]
-
- var err error
- var n int
- for {
- n, err = r.Read(buf)
- if n == 0 {
- if err == nil {
- panic("BUG: io.Reader returned 0, nil")
- }
- if err == io.EOF {
- if err = writeChunk(w, buf[:0]); err != nil {
- break
- }
- err = nil
- }
- break
- }
- if err = writeChunk(w, buf[:n]); err != nil {
- break
- }
- }
-
- copyBufPool.Put(bufv)
- return err
-}
-
-func limitedReaderSize(r io.Reader) int64 {
- lr, ok := r.(*io.LimitedReader)
- if !ok {
- return -1
- }
- return lr.N
-}
-
-func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int64) error {
- if size > maxSmallFileSize {
- // w buffer must be empty for triggering
- // sendfile path in bufio.Writer.ReadFrom.
- if err := w.Flush(); err != nil {
- return err
- }
- }
-
- // Unwrap a single limited reader for triggering sendfile path
- // in net.TCPConn.ReadFrom.
- lr, ok := r.(*io.LimitedReader)
- if ok {
- r = lr.R
- }
-
- n, err := copyZeroAlloc(w, r)
-
- if ok {
- lr.N -= n
- }
-
- if n != size && err == nil {
- err = fmt.Errorf("copied %d bytes from body stream instead of %d bytes", n, size)
- }
- return err
-}
-
-func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) {
- buf := copyBufPool.Get().(*copyBuf)
- n, err := io.CopyBuffer(w, r, buf.b[:])
- copyBufPool.Put(buf)
- return n, err
-}
-
-type copyBuf struct {
- b [4 * 4096]byte
-}
-
-var copyBufPool = sync.Pool{
- New: func() interface{} {
- return ©Buf{}
- },
-}
-
-func writeChunk(w *bufio.Writer, b []byte) error {
- n := len(b)
- writeHexInt(w, n)
- w.Write(strCRLF)
- w.Write(b)
- _, err := w.Write(strCRLF)
- err1 := w.Flush()
- if err == nil {
- err = err1
- }
- return err
-}
-
-// ErrBodyTooLarge is returned if either request or response body exceeds
-// the given limit.
-var ErrBodyTooLarge = errors.New("body size exceeds the given limit")
-
-func readBody(r *bufio.Reader, contentLength int, maxBodySize int, dst []byte) ([]byte, error) {
- dst = dst[:0]
- if contentLength >= 0 {
- if maxBodySize > 0 && contentLength > maxBodySize {
- return dst, ErrBodyTooLarge
- }
- return appendBodyFixedSize(r, dst, contentLength)
- }
- if contentLength == -1 {
- return readBodyChunked(r, maxBodySize, dst)
- }
- return readBodyIdentity(r, maxBodySize, dst)
-}
-
-func readBodyIdentity(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) {
- dst = dst[:cap(dst)]
- if len(dst) == 0 {
- dst = make([]byte, 1024)
- }
- offset := 0
- for {
- nn, err := r.Read(dst[offset:])
- if nn <= 0 {
- if err != nil {
- if err == io.EOF {
- return dst[:offset], nil
- }
- return dst[:offset], err
- }
- panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn))
- }
- offset += nn
- if maxBodySize > 0 && offset > maxBodySize {
- return dst[:offset], ErrBodyTooLarge
- }
- if len(dst) == offset {
- n := round2(2 * offset)
- if maxBodySize > 0 && n > maxBodySize {
- n = maxBodySize + 1
- }
- b := make([]byte, n)
- copy(b, dst)
- dst = b
- }
- }
-}
-
-func appendBodyFixedSize(r *bufio.Reader, dst []byte, n int) ([]byte, error) {
- if n == 0 {
- return dst, nil
- }
-
- offset := len(dst)
- dstLen := offset + n
- if cap(dst) < dstLen {
- b := make([]byte, round2(dstLen))
- copy(b, dst)
- dst = b
- }
- dst = dst[:dstLen]
-
- for {
- nn, err := r.Read(dst[offset:])
- if nn <= 0 {
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return dst[:offset], err
- }
- panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn))
- }
- offset += nn
- if offset == dstLen {
- return dst, nil
- }
- }
-}
-
-func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) {
- if len(dst) > 0 {
- panic("BUG: expected zero-length buffer")
- }
-
- strCRLFLen := len(strCRLF)
- for {
- chunkSize, err := parseChunkSize(r)
- if err != nil {
- return dst, err
- }
- if maxBodySize > 0 && len(dst)+chunkSize > maxBodySize {
- return dst, ErrBodyTooLarge
- }
- dst, err = appendBodyFixedSize(r, dst, chunkSize+strCRLFLen)
- if err != nil {
- return dst, err
- }
- if !bytes.Equal(dst[len(dst)-strCRLFLen:], strCRLF) {
- return dst, fmt.Errorf("cannot find crlf at the end of chunk")
- }
- dst = dst[:len(dst)-strCRLFLen]
- if chunkSize == 0 {
- return dst, nil
- }
- }
-}
-
-func parseChunkSize(r *bufio.Reader) (int, error) {
- n, err := readHexInt(r)
- if err != nil {
- return -1, err
- }
- c, err := r.ReadByte()
- if err != nil {
- return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err)
- }
- if c != '\r' {
- return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r')
- }
- c, err = r.ReadByte()
- if err != nil {
- return -1, fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err)
- }
- if c != '\n' {
- return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n')
- }
- return n, nil
-}
-
-func round2(n int) int {
- if n <= 0 {
- return 0
- }
- n--
- x := uint(0)
- for n > 0 {
- n >>= 1
- x++
- }
- return 1 << x
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/lbclient.go b/vendor/github.com/VictoriaMetrics/fasthttp/lbclient.go
deleted file mode 100644
index 41fe727f9..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/lbclient.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package fasthttp
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-// BalancingClient is the interface for clients, which may be passed
-// to LBClient.Clients.
-type BalancingClient interface {
- DoDeadline(req *Request, resp *Response, deadline time.Time) error
- PendingRequests() int
-}
-
-// LBClient balances requests among available LBClient.Clients.
-//
-// It has the following features:
-//
-// - Balances load among available clients using 'least loaded' + 'round robin'
-// hybrid technique.
-// - Dynamically decreases load on unhealthy clients.
-//
-// It is forbidden copying LBClient instances. Create new instances instead.
-//
-// It is safe calling LBClient methods from concurrently running goroutines.
-type LBClient struct {
- noCopy noCopy
-
- // Clients must contain non-zero clients list.
- // Incoming requests are balanced among these clients.
- Clients []BalancingClient
-
- // HealthCheck is a callback called after each request.
- //
- // The request, response and the error returned by the client
- // is passed to HealthCheck, so the callback may determine whether
- // the client is healthy.
- //
- // Load on the current client is decreased if HealthCheck returns false.
- //
- // By default HealthCheck returns false if err != nil.
- HealthCheck func(req *Request, resp *Response, err error) bool
-
- // Timeout is the request timeout used when calling LBClient.Do.
- //
- // DefaultLBClientTimeout is used by default.
- Timeout time.Duration
-
- cs []*lbClient
-
- // nextIdx is for spreading requests among equally loaded clients
- // in a round-robin fashion.
- nextIdx uint32
-
- once sync.Once
-}
-
-// DefaultLBClientTimeout is the default request timeout used by LBClient
-// when calling LBClient.Do.
-//
-// The timeout may be overriden via LBClient.Timeout.
-const DefaultLBClientTimeout = time.Second
-
-// DoDeadline calls DoDeadline on the least loaded client
-func (cc *LBClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- return cc.get().DoDeadline(req, resp, deadline)
-}
-
-// DoTimeout calculates deadline and calls DoDeadline on the least loaded client
-func (cc *LBClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
- deadline := time.Now().Add(timeout)
- return cc.get().DoDeadline(req, resp, deadline)
-}
-
-// Do calls calculates deadline using LBClient.Timeout and calls DoDeadline
-// on the least loaded client.
-func (cc *LBClient) Do(req *Request, resp *Response) error {
- timeout := cc.Timeout
- if timeout <= 0 {
- timeout = DefaultLBClientTimeout
- }
- return cc.DoTimeout(req, resp, timeout)
-}
-
-func (cc *LBClient) init() {
- if len(cc.Clients) == 0 {
- panic("BUG: LBClient.Clients cannot be empty")
- }
- for _, c := range cc.Clients {
- cc.cs = append(cc.cs, &lbClient{
- c: c,
- healthCheck: cc.HealthCheck,
- })
- }
-
- // Randomize nextIdx in order to prevent initial servers'
- // hammering from a cluster of identical LBClients.
- cc.nextIdx = uint32(time.Now().UnixNano())
-}
-
-func (cc *LBClient) get() *lbClient {
- cc.once.Do(cc.init)
-
- cs := cc.cs
- idx := atomic.AddUint32(&cc.nextIdx, 1)
- idx %= uint32(len(cs))
-
- minC := cs[idx]
- minN := minC.PendingRequests()
- if minN == 0 {
- return minC
- }
- for _, c := range cs[idx+1:] {
- n := c.PendingRequests()
- if n == 0 {
- return c
- }
- if n < minN {
- minC = c
- minN = n
- }
- }
- for _, c := range cs[:idx] {
- n := c.PendingRequests()
- if n == 0 {
- return c
- }
- if n < minN {
- minC = c
- minN = n
- }
- }
- return minC
-}
-
-type lbClient struct {
- c BalancingClient
- healthCheck func(req *Request, resp *Response, err error) bool
- penalty uint32
-}
-
-func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
- err := c.c.DoDeadline(req, resp, deadline)
- if !c.isHealthy(req, resp, err) && c.incPenalty() {
- // Penalize the client returning error, so the next requests
- // are routed to another clients.
- time.AfterFunc(penaltyDuration, c.decPenalty)
- }
- return err
-}
-
-func (c *lbClient) PendingRequests() int {
- n := c.c.PendingRequests()
- m := atomic.LoadUint32(&c.penalty)
- return n + int(m)
-}
-
-func (c *lbClient) isHealthy(req *Request, resp *Response, err error) bool {
- if c.healthCheck == nil {
- return err == nil
- }
- return c.healthCheck(req, resp, err)
-}
-
-func (c *lbClient) incPenalty() bool {
- m := atomic.AddUint32(&c.penalty, 1)
- if m > maxPenalty {
- c.decPenalty()
- return false
- }
- return true
-}
-
-func (c *lbClient) decPenalty() {
- atomic.AddUint32(&c.penalty, ^uint32(0))
-}
-
-const (
- maxPenalty = 300
-
- penaltyDuration = 3 * time.Second
-)
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/nocopy.go b/vendor/github.com/VictoriaMetrics/fasthttp/nocopy.go
deleted file mode 100644
index 32af52e43..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/nocopy.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package fasthttp
-
-// Embed this type into a struct, which mustn't be copied,
-// so `go vet` gives a warning if this struct is copied.
-//
-// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details.
-type noCopy struct{}
-
-func (*noCopy) Lock() {}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/peripconn.go b/vendor/github.com/VictoriaMetrics/fasthttp/peripconn.go
deleted file mode 100644
index afd2a9270..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/peripconn.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package fasthttp
-
-import (
- "fmt"
- "net"
- "sync"
-)
-
-type perIPConnCounter struct {
- pool sync.Pool
- lock sync.Mutex
- m map[uint32]int
-}
-
-func (cc *perIPConnCounter) Register(ip uint32) int {
- cc.lock.Lock()
- if cc.m == nil {
- cc.m = make(map[uint32]int)
- }
- n := cc.m[ip] + 1
- cc.m[ip] = n
- cc.lock.Unlock()
- return n
-}
-
-func (cc *perIPConnCounter) Unregister(ip uint32) {
- cc.lock.Lock()
- if cc.m == nil {
- cc.lock.Unlock()
- panic("BUG: perIPConnCounter.Register() wasn't called")
- }
- n := cc.m[ip] - 1
- if n < 0 {
- cc.lock.Unlock()
- panic(fmt.Sprintf("BUG: negative per-ip counter=%d for ip=%d", n, ip))
- }
- cc.m[ip] = n
- cc.lock.Unlock()
-}
-
-type perIPConn struct {
- net.Conn
-
- ip uint32
- perIPConnCounter *perIPConnCounter
-}
-
-func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) *perIPConn {
- v := counter.pool.Get()
- if v == nil {
- v = &perIPConn{
- perIPConnCounter: counter,
- }
- }
- c := v.(*perIPConn)
- c.Conn = conn
- c.ip = ip
- return c
-}
-
-func releasePerIPConn(c *perIPConn) {
- c.Conn = nil
- c.perIPConnCounter.pool.Put(c)
-}
-
-func (c *perIPConn) Close() error {
- err := c.Conn.Close()
- c.perIPConnCounter.Unregister(c.ip)
- releasePerIPConn(c)
- return err
-}
-
-func getUint32IP(c net.Conn) uint32 {
- return ip2uint32(getConnIP4(c))
-}
-
-func getConnIP4(c net.Conn) net.IP {
- addr := c.RemoteAddr()
- ipAddr, ok := addr.(*net.TCPAddr)
- if !ok {
- return net.IPv4zero
- }
- return ipAddr.IP.To4()
-}
-
-func ip2uint32(ip net.IP) uint32 {
- if len(ip) != 4 {
- return 0
- }
- return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3])
-}
-
-func uint322ip(ip uint32) net.IP {
- b := make([]byte, 4)
- b[0] = byte(ip >> 24)
- b[1] = byte(ip >> 16)
- b[2] = byte(ip >> 8)
- b[3] = byte(ip)
- return b
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/server.go b/vendor/github.com/VictoriaMetrics/fasthttp/server.go
deleted file mode 100644
index 309c78b92..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/server.go
+++ /dev/null
@@ -1,1981 +0,0 @@
-package fasthttp
-
-import (
- "bufio"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "log"
- "mime/multipart"
- "net"
- "os"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// ServeConn serves HTTP requests from the given connection
-// using the given handler.
-//
-// ServeConn returns nil if all requests from the c are successfully served.
-// It returns non-nil error otherwise.
-//
-// Connection c must immediately propagate all the data passed to Write()
-// to the client. Otherwise requests' processing may hang.
-//
-// ServeConn closes c before returning.
-func ServeConn(c net.Conn, handler RequestHandler) error {
- v := serverPool.Get()
- if v == nil {
- v = &Server{}
- }
- s := v.(*Server)
- s.Handler = handler
- err := s.ServeConn(c)
- s.Handler = nil
- serverPool.Put(v)
- return err
-}
-
-var serverPool sync.Pool
-
-// Serve serves incoming connections from the given listener
-// using the given handler.
-//
-// Serve blocks until the given listener returns permanent error.
-func Serve(ln net.Listener, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.Serve(ln)
-}
-
-// ServeTLS serves HTTPS requests from the given net.Listener
-// using the given handler.
-//
-// certFile and keyFile are paths to TLS certificate and key files.
-func ServeTLS(ln net.Listener, certFile, keyFile string, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.ServeTLS(ln, certFile, keyFile)
-}
-
-// ServeTLSEmbed serves HTTPS requests from the given net.Listener
-// using the given handler.
-//
-// certData and keyData must contain valid TLS certificate and key data.
-func ServeTLSEmbed(ln net.Listener, certData, keyData []byte, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.ServeTLSEmbed(ln, certData, keyData)
-}
-
-// ListenAndServe serves HTTP requests from the given TCP addr
-// using the given handler.
-func ListenAndServe(addr string, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.ListenAndServe(addr)
-}
-
-// ListenAndServeUNIX serves HTTP requests from the given UNIX addr
-// using the given handler.
-//
-// The function deletes existing file at addr before starting serving.
-//
-// The server sets the given file mode for the UNIX addr.
-func ListenAndServeUNIX(addr string, mode os.FileMode, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.ListenAndServeUNIX(addr, mode)
-}
-
-// ListenAndServeTLS serves HTTPS requests from the given TCP addr
-// using the given handler.
-//
-// certFile and keyFile are paths to TLS certificate and key files.
-func ListenAndServeTLS(addr, certFile, keyFile string, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.ListenAndServeTLS(addr, certFile, keyFile)
-}
-
-// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP addr
-// using the given handler.
-//
-// certData and keyData must contain valid TLS certificate and key data.
-func ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler RequestHandler) error {
- s := &Server{
- Handler: handler,
- }
- return s.ListenAndServeTLSEmbed(addr, certData, keyData)
-}
-
-// RequestHandler must process incoming requests.
-//
-// RequestHandler must call ctx.TimeoutError() before returning
-// if it keeps references to ctx and/or its' members after the return.
-// Consider wrapping RequestHandler into TimeoutHandler if response time
-// must be limited.
-type RequestHandler func(ctx *RequestCtx)
-
-// Server implements HTTP server.
-//
-// Default Server settings should satisfy the majority of Server users.
-// Adjust Server settings only if you really understand the consequences.
-//
-// It is forbidden copying Server instances. Create new Server instances
-// instead.
-//
-// It is safe to call Server methods from concurrently running goroutines.
-type Server struct {
- noCopy noCopy
-
- // Handler for processing incoming requests.
- Handler RequestHandler
-
- // Server name for sending in response headers.
- //
- // Default server name is used if left blank.
- Name string
-
- // The maximum number of concurrent connections the server may serve.
- //
- // DefaultConcurrency is used if not set.
- Concurrency int
-
- // Whether to disable keep-alive connections.
- //
- // The server will close all the incoming connections after sending
- // the first response to client if this option is set to true.
- //
- // By default keep-alive connections are enabled.
- DisableKeepalive bool
-
- // Per-connection buffer size for requests' reading.
- // This also limits the maximum header size.
- //
- // Increase this buffer if your clients send multi-KB RequestURIs
- // and/or multi-KB headers (for example, BIG cookies).
- //
- // Default buffer size is used if not set.
- ReadBufferSize int
-
- // Per-connection buffer size for responses' writing.
- //
- // Default buffer size is used if not set.
- WriteBufferSize int
-
- // Maximum duration for reading the full request (including body).
- //
- // This also limits the maximum duration for idle keep-alive
- // connections.
- //
- // By default request read timeout is unlimited.
- ReadTimeout time.Duration
-
- // Maximum duration for writing the full response (including body).
- //
- // By default response write timeout is unlimited.
- WriteTimeout time.Duration
-
- // Maximum number of concurrent client connections allowed per IP.
- //
- // By default unlimited number of concurrent connections
- // may be established to the server from a single IP address.
- MaxConnsPerIP int
-
- // Maximum number of requests served per connection.
- //
- // The server closes connection after the last request.
- // 'Connection: close' header is added to the last response.
- //
- // By default unlimited number of requests may be served per connection.
- MaxRequestsPerConn int
-
- // Maximum keep-alive connection lifetime.
- //
- // The server closes keep-alive connection after its' lifetime
- // expiration.
- //
- // See also ReadTimeout for limiting the duration of idle keep-alive
- // connections.
- //
- // By default keep-alive connection lifetime is unlimited.
- MaxKeepaliveDuration time.Duration
-
- // Maximum request body size.
- //
- // The server rejects requests with bodies exceeding this limit.
- //
- // Request body size is limited by DefaultMaxRequestBodySize by default.
- MaxRequestBodySize int
-
- // Aggressively reduces memory usage at the cost of higher CPU usage
- // if set to true.
- //
- // Try enabling this option only if the server consumes too much memory
- // serving mostly idle keep-alive connections. This may reduce memory
- // usage by more than 50%.
- //
- // Aggressive memory usage reduction is disabled by default.
- ReduceMemoryUsage bool
-
- // Rejects all non-GET requests if set to true.
- //
- // This option is useful as anti-DoS protection for servers
- // accepting only GET requests. The request size is limited
- // by ReadBufferSize if GetOnly is set.
- //
- // Server accepts all the requests by default.
- GetOnly bool
-
- // Logs all errors, including the most frequent
- // 'connection reset by peer', 'broken pipe' and 'connection timeout'
- // errors. Such errors are common in production serving real-world
- // clients.
- //
- // By default the most frequent errors such as
- // 'connection reset by peer', 'broken pipe' and 'connection timeout'
- // are suppressed in order to limit output log traffic.
- LogAllErrors bool
-
- // Logger, which is used by RequestCtx.Logger().
- //
- // By default standard logger from log package is used.
- Logger Logger
-
- concurrency uint32
- concurrencyCh chan struct{}
- perIPConnCounter perIPConnCounter
- serverName atomic.Value
-
- ctxPool sync.Pool
- readerPool sync.Pool
- writerPool sync.Pool
- hijackConnPool sync.Pool
- bytePool sync.Pool
-}
-
-// TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout
-// error with the given msg to the client if h didn't return during
-// the given duration.
-//
-// The returned handler may return StatusTooManyRequests error with the given
-// msg to the client if there are more than Server.Concurrency concurrent
-// handlers h are running at the moment.
-func TimeoutHandler(h RequestHandler, timeout time.Duration, msg string) RequestHandler {
- if timeout <= 0 {
- return h
- }
-
- return func(ctx *RequestCtx) {
- concurrencyCh := ctx.s.concurrencyCh
- select {
- case concurrencyCh <- struct{}{}:
- default:
- ctx.Error(msg, StatusTooManyRequests)
- return
- }
-
- ch := ctx.timeoutCh
- if ch == nil {
- ch = make(chan struct{}, 1)
- ctx.timeoutCh = ch
- }
- go func() {
- h(ctx)
- ch <- struct{}{}
- <-concurrencyCh
- }()
- ctx.timeoutTimer = initTimer(ctx.timeoutTimer, timeout)
- select {
- case <-ch:
- case <-ctx.timeoutTimer.C:
- ctx.TimeoutError(msg)
- }
- stopTimer(ctx.timeoutTimer)
- }
-}
-
-// CompressHandler returns RequestHandler that transparently compresses
-// response body generated by h if the request contains 'gzip' or 'deflate'
-// 'Accept-Encoding' header.
-func CompressHandler(h RequestHandler) RequestHandler {
- return CompressHandlerLevel(h, CompressDefaultCompression)
-}
-
-// CompressHandlerLevel returns RequestHandler that transparently compresses
-// response body generated by h if the request contains 'gzip' or 'deflate'
-// 'Accept-Encoding' header.
-//
-// Level is the desired compression level:
-//
-// - CompressNoCompression
-// - CompressBestSpeed
-// - CompressBestCompression
-// - CompressDefaultCompression
-// - CompressHuffmanOnly
-func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
- return func(ctx *RequestCtx) {
- h(ctx)
- ce := ctx.Response.Header.PeekBytes(strContentEncoding)
- if len(ce) > 0 {
- // Do not compress responses with non-empty
- // Content-Encoding.
- return
- }
- if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) {
- ctx.Response.gzipBody(level)
- } else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) {
- ctx.Response.deflateBody(level)
- }
- }
-}
-
-// RequestCtx contains incoming request and manages outgoing response.
-//
-// It is forbidden copying RequestCtx instances.
-//
-// RequestHandler should avoid holding references to incoming RequestCtx and/or
-// its' members after the return.
-// If holding RequestCtx references after the return is unavoidable
-// (for instance, ctx is passed to a separate goroutine and ctx lifetime cannot
-// be controlled), then the RequestHandler MUST call ctx.TimeoutError()
-// before return.
-//
-// It is unsafe modifying/reading RequestCtx instance from concurrently
-// running goroutines. The only exception is TimeoutError*, which may be called
-// while other goroutines accessing RequestCtx.
-type RequestCtx struct {
- noCopy noCopy
-
- // Incoming request.
- //
- // Copying Request by value is forbidden. Use pointer to Request instead.
- Request Request
-
- // Outgoing response.
- //
- // Copying Response by value is forbidden. Use pointer to Response instead.
- Response Response
-
- userValues userData
-
- lastReadDuration time.Duration
-
- connID uint64
- connRequestNum uint64
- connTime time.Time
-
- time time.Time
-
- logger ctxLogger
- s *Server
- c net.Conn
- fbr firstByteReader
-
- timeoutResponse *Response
- timeoutCh chan struct{}
- timeoutTimer *time.Timer
-
- hijackHandler HijackHandler
-}
-
-// HijackHandler must process the hijacked connection c.
-//
-// The connection c is automatically closed after returning from HijackHandler.
-//
-// The connection c must not be used after returning from the handler.
-type HijackHandler func(c net.Conn)
-
-// Hijack registers the given handler for connection hijacking.
-//
-// The handler is called after returning from RequestHandler
-// and sending http response. The current connection is passed
-// to the handler. The connection is automatically closed after
-// returning from the handler.
-//
-// The server skips calling the handler in the following cases:
-//
-// - 'Connection: close' header exists in either request or response.
-// - Unexpected error during response writing to the connection.
-//
-// The server stops processing requests from hijacked connections.
-// Server limits such as Concurrency, ReadTimeout, WriteTimeout, etc.
-// aren't applied to hijacked connections.
-//
-// The handler must not retain references to ctx members.
-//
-// Arbitrary 'Connection: Upgrade' protocols may be implemented
-// with HijackHandler. For instance,
-//
-// - WebSocket ( https://en.wikipedia.org/wiki/WebSocket )
-// - HTTP/2.0 ( https://en.wikipedia.org/wiki/HTTP/2 )
-func (ctx *RequestCtx) Hijack(handler HijackHandler) {
- ctx.hijackHandler = handler
-}
-
-// Hijacked returns true after Hijack is called.
-func (ctx *RequestCtx) Hijacked() bool {
- return ctx.hijackHandler != nil
-}
-
-// SetUserValue stores the given value (arbitrary object)
-// under the given key in ctx.
-//
-// The value stored in ctx may be obtained by UserValue*.
-//
-// This functionality may be useful for passing arbitrary values between
-// functions involved in request processing.
-//
-// All the values are removed from ctx after returning from the top
-// RequestHandler. Additionally, Close method is called on each value
-// implementing io.Closer before removing the value from ctx.
-func (ctx *RequestCtx) SetUserValue(key string, value interface{}) {
- ctx.userValues.Set(key, value)
-}
-
-// SetUserValueBytes stores the given value (arbitrary object)
-// under the given key in ctx.
-//
-// The value stored in ctx may be obtained by UserValue*.
-//
-// This functionality may be useful for passing arbitrary values between
-// functions involved in request processing.
-//
-// All the values stored in ctx are deleted after returning from RequestHandler.
-func (ctx *RequestCtx) SetUserValueBytes(key []byte, value interface{}) {
- ctx.userValues.SetBytes(key, value)
-}
-
-// UserValue returns the value stored via SetUserValue* under the given key.
-func (ctx *RequestCtx) UserValue(key string) interface{} {
- return ctx.userValues.Get(key)
-}
-
-// UserValueBytes returns the value stored via SetUserValue*
-// under the given key.
-func (ctx *RequestCtx) UserValueBytes(key []byte) interface{} {
- return ctx.userValues.GetBytes(key)
-}
-
-// VisitUserValues calls visitor for each existing userValue.
-//
-// visitor must not retain references to key and value after returning.
-// Make key and/or value copies if you need storing them after returning.
-func (ctx *RequestCtx) VisitUserValues(visitor func([]byte, interface{})) {
- for i, n := 0, len(ctx.userValues); i < n; i++ {
- kv := &ctx.userValues[i]
- visitor(kv.key, kv.value)
- }
-}
-
-type connTLSer interface {
- ConnectionState() tls.ConnectionState
-}
-
-// IsTLS returns true if the underlying connection is tls.Conn.
-//
-// tls.Conn is an encrypted connection (aka SSL, HTTPS).
-func (ctx *RequestCtx) IsTLS() bool {
- // cast to (connTLSer) instead of (*tls.Conn), since it catches
- // cases with overridden tls.Conn such as:
- //
- // type customConn struct {
- // *tls.Conn
- //
- // // other custom fields here
- // }
- _, ok := ctx.c.(connTLSer)
- return ok
-}
-
-// TLSConnectionState returns TLS connection state.
-//
-// The function returns nil if the underlying connection isn't tls.Conn.
-//
-// The returned state may be used for verifying TLS version, client certificates,
-// etc.
-func (ctx *RequestCtx) TLSConnectionState() *tls.ConnectionState {
- tlsConn, ok := ctx.c.(connTLSer)
- if !ok {
- return nil
- }
- state := tlsConn.ConnectionState()
- return &state
-}
-
-type firstByteReader struct {
- c net.Conn
- ch byte
- byteRead bool
-}
-
-func (r *firstByteReader) Read(b []byte) (int, error) {
- if len(b) == 0 {
- return 0, nil
- }
- nn := 0
- if !r.byteRead {
- b[0] = r.ch
- b = b[1:]
- r.byteRead = true
- nn = 1
- }
- n, err := r.c.Read(b)
- return n + nn, err
-}
-
-// Logger is used for logging formatted messages.
-type Logger interface {
- // Printf must have the same semantics as log.Printf.
- Printf(format string, args ...interface{})
-}
-
-var ctxLoggerLock sync.Mutex
-
-type ctxLogger struct {
- ctx *RequestCtx
- logger Logger
-}
-
-func (cl *ctxLogger) Printf(format string, args ...interface{}) {
- ctxLoggerLock.Lock()
- msg := fmt.Sprintf(format, args...)
- ctx := cl.ctx
- cl.logger.Printf("%.3f %s - %s", time.Since(ctx.Time()).Seconds(), ctx.String(), msg)
- ctxLoggerLock.Unlock()
-}
-
-var zeroTCPAddr = &net.TCPAddr{
- IP: net.IPv4zero,
-}
-
-// String returns unique string representation of the ctx.
-//
-// The returned value may be useful for logging.
-func (ctx *RequestCtx) String() string {
- return fmt.Sprintf("#%016X - %s<->%s - %s %s", ctx.ID(), ctx.LocalAddr(), ctx.RemoteAddr(), ctx.Request.Header.Method(), ctx.URI().FullURI())
-}
-
-// ID returns unique ID of the request.
-func (ctx *RequestCtx) ID() uint64 {
- return (ctx.connID << 32) | ctx.connRequestNum
-}
-
-// ConnID returns unique connection ID.
-//
-// This ID may be used to match distinct requests to the same incoming
-// connection.
-func (ctx *RequestCtx) ConnID() uint64 {
- return ctx.connID
-}
-
-// Time returns RequestHandler call time truncated to the nearest second.
-//
-// Call time.Now() at the beginning of RequestHandler in order to obtain
-// percise RequestHandler call time.
-func (ctx *RequestCtx) Time() time.Time {
- return ctx.time
-}
-
-// ConnTime returns the time server starts serving the connection
-// the current request came from.
-//
-// The returned time is truncated to the nearest second.
-func (ctx *RequestCtx) ConnTime() time.Time {
- return ctx.connTime
-}
-
-// ConnRequestNum returns request sequence number
-// for the current connection.
-//
-// Sequence starts with 1.
-func (ctx *RequestCtx) ConnRequestNum() uint64 {
- return ctx.connRequestNum
-}
-
-// SetConnectionClose sets 'Connection: close' response header and closes
-// connection after the RequestHandler returns.
-func (ctx *RequestCtx) SetConnectionClose() {
- ctx.Response.SetConnectionClose()
-}
-
-// SetStatusCode sets response status code.
-func (ctx *RequestCtx) SetStatusCode(statusCode int) {
- ctx.Response.SetStatusCode(statusCode)
-}
-
-// SetContentType sets response Content-Type.
-func (ctx *RequestCtx) SetContentType(contentType string) {
- ctx.Response.Header.SetContentType(contentType)
-}
-
-// SetContentTypeBytes sets response Content-Type.
-//
-// It is safe modifying contentType buffer after function return.
-func (ctx *RequestCtx) SetContentTypeBytes(contentType []byte) {
- ctx.Response.Header.SetContentTypeBytes(contentType)
-}
-
-// RequestURI returns RequestURI.
-//
-// This uri is valid until returning from RequestHandler.
-func (ctx *RequestCtx) RequestURI() []byte {
- return ctx.Request.Header.RequestURI()
-}
-
-// URI returns requested uri.
-//
-// The uri is valid until returning from RequestHandler.
-func (ctx *RequestCtx) URI() *URI {
- return ctx.Request.URI()
-}
-
-// Referer returns request referer.
-//
-// The referer is valid until returning from RequestHandler.
-func (ctx *RequestCtx) Referer() []byte {
- return ctx.Request.Header.Referer()
-}
-
-// UserAgent returns User-Agent header value from the request.
-func (ctx *RequestCtx) UserAgent() []byte {
- return ctx.Request.Header.UserAgent()
-}
-
-// Path returns requested path.
-//
-// The path is valid until returning from RequestHandler.
-func (ctx *RequestCtx) Path() []byte {
- return ctx.URI().Path()
-}
-
-// Host returns requested host.
-//
-// The host is valid until returning from RequestHandler.
-func (ctx *RequestCtx) Host() []byte {
- return ctx.URI().Host()
-}
-
-// QueryArgs returns query arguments from RequestURI.
-//
-// It doesn't return POST'ed arguments - use PostArgs() for this.
-//
-// Returned arguments are valid until returning from RequestHandler.
-//
-// See also PostArgs, FormValue and FormFile.
-func (ctx *RequestCtx) QueryArgs() *Args {
- return ctx.URI().QueryArgs()
-}
-
-// PostArgs returns POST arguments.
-//
-// It doesn't return query arguments from RequestURI - use QueryArgs for this.
-//
-// Returned arguments are valid until returning from RequestHandler.
-//
-// See also QueryArgs, FormValue and FormFile.
-func (ctx *RequestCtx) PostArgs() *Args {
- return ctx.Request.PostArgs()
-}
-
-// MultipartForm returns requests's multipart form.
-//
-// Returns ErrNoMultipartForm if request's content-type
-// isn't 'multipart/form-data'.
-//
-// All uploaded temporary files are automatically deleted after
-// returning from RequestHandler. Either move or copy uploaded files
-// into new place if you want retaining them.
-//
-// Use SaveMultipartFile function for permanently saving uploaded file.
-//
-// The returned form is valid until returning from RequestHandler.
-//
-// See also FormFile and FormValue.
-func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) {
- return ctx.Request.MultipartForm()
-}
-
-// FormFile returns uploaded file associated with the given multipart form key.
-//
-// The file is automatically deleted after returning from RequestHandler,
-// so either move or copy uploaded file into new place if you want retaining it.
-//
-// Use SaveMultipartFile function for permanently saving uploaded file.
-//
-// The returned file header is valid until returning from RequestHandler.
-func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) {
- mf, err := ctx.MultipartForm()
- if err != nil {
- return nil, err
- }
- if mf.File == nil {
- return nil, err
- }
- fhh := mf.File[key]
- if fhh == nil {
- return nil, ErrMissingFile
- }
- return fhh[0], nil
-}
-
-// ErrMissingFile may be returned from FormFile when the is no uploaded file
-// associated with the given multipart form key.
-var ErrMissingFile = errors.New("there is no uploaded file associated with the given key")
-
-// SaveMultipartFile saves multipart file fh under the given filename path.
-func SaveMultipartFile(fh *multipart.FileHeader, path string) error {
- f, err := fh.Open()
- if err != nil {
- return err
- }
- defer f.Close()
-
- if ff, ok := f.(*os.File); ok {
- return os.Rename(ff.Name(), path)
- }
-
- ff, err := os.Create(path)
- if err != nil {
- return err
- }
- defer ff.Close()
- _, err = copyZeroAlloc(ff, f)
- return err
-}
-
-// FormValue returns form value associated with the given key.
-//
-// The value is searched in the following places:
-//
-// - Query string.
-// - POST or PUT body.
-//
-// There are more fine-grained methods for obtaining form values:
-//
-// - QueryArgs for obtaining values from query string.
-// - PostArgs for obtaining values from POST or PUT body.
-// - MultipartForm for obtaining values from multipart form.
-// - FormFile for obtaining uploaded files.
-//
-// The returned value is valid until returning from RequestHandler.
-func (ctx *RequestCtx) FormValue(key string) []byte {
- v := ctx.QueryArgs().Peek(key)
- if len(v) > 0 {
- return v
- }
- v = ctx.PostArgs().Peek(key)
- if len(v) > 0 {
- return v
- }
- mf, err := ctx.MultipartForm()
- if err == nil && mf.Value != nil {
- vv := mf.Value[key]
- if len(vv) > 0 {
- return []byte(vv[0])
- }
- }
- return nil
-}
-
-// IsGet returns true if request method is GET.
-func (ctx *RequestCtx) IsGet() bool {
- return ctx.Request.Header.IsGet()
-}
-
-// IsPost returns true if request method is POST.
-func (ctx *RequestCtx) IsPost() bool {
- return ctx.Request.Header.IsPost()
-}
-
-// IsPut returns true if request method is PUT.
-func (ctx *RequestCtx) IsPut() bool {
- return ctx.Request.Header.IsPut()
-}
-
-// IsDelete returns true if request method is DELETE.
-func (ctx *RequestCtx) IsDelete() bool {
- return ctx.Request.Header.IsDelete()
-}
-
-// Method return request method.
-//
-// Returned value is valid until returning from RequestHandler.
-func (ctx *RequestCtx) Method() []byte {
- return ctx.Request.Header.Method()
-}
-
-// IsHead returns true if request method is HEAD.
-func (ctx *RequestCtx) IsHead() bool {
- return ctx.Request.Header.IsHead()
-}
-
-// RemoteAddr returns client address for the given request.
-//
-// Always returns non-nil result.
-func (ctx *RequestCtx) RemoteAddr() net.Addr {
- if ctx.c == nil {
- return zeroTCPAddr
- }
- addr := ctx.c.RemoteAddr()
- if addr == nil {
- return zeroTCPAddr
- }
- return addr
-}
-
-// LocalAddr returns server address for the given request.
-//
-// Always returns non-nil result.
-func (ctx *RequestCtx) LocalAddr() net.Addr {
- if ctx.c == nil {
- return zeroTCPAddr
- }
- addr := ctx.c.LocalAddr()
- if addr == nil {
- return zeroTCPAddr
- }
- return addr
-}
-
-// RemoteIP returns the client ip the request came from.
-//
-// Always returns non-nil result.
-func (ctx *RequestCtx) RemoteIP() net.IP {
- return addrToIP(ctx.RemoteAddr())
-}
-
-// LocalIP returns the server ip the request came to.
-//
-// Always returns non-nil result.
-func (ctx *RequestCtx) LocalIP() net.IP {
- return addrToIP(ctx.LocalAddr())
-}
-
-func addrToIP(addr net.Addr) net.IP {
- x, ok := addr.(*net.TCPAddr)
- if !ok {
- return net.IPv4zero
- }
- return x.IP
-}
-
-// Error sets response status code to the given value and sets response body
-// to the given message.
-func (ctx *RequestCtx) Error(msg string, statusCode int) {
- ctx.Response.Reset()
- ctx.SetStatusCode(statusCode)
- ctx.SetContentTypeBytes(defaultContentType)
- ctx.SetBodyString(msg)
-}
-
-// Success sets response Content-Type and body to the given values.
-func (ctx *RequestCtx) Success(contentType string, body []byte) {
- ctx.SetContentType(contentType)
- ctx.SetBody(body)
-}
-
-// SuccessString sets response Content-Type and body to the given values.
-func (ctx *RequestCtx) SuccessString(contentType, body string) {
- ctx.SetContentType(contentType)
- ctx.SetBodyString(body)
-}
-
-// Redirect sets 'Location: uri' response header and sets the given statusCode.
-//
-// statusCode must have one of the following values:
-//
-// - StatusMovedPermanently (301)
-// - StatusFound (302)
-// - StatusSeeOther (303)
-// - StatusTemporaryRedirect (307)
-//
-// All other statusCode values are replaced by StatusFound (302).
-//
-// The redirect uri may be either absolute or relative to the current
-// request uri.
-func (ctx *RequestCtx) Redirect(uri string, statusCode int) {
- u := AcquireURI()
- ctx.URI().CopyTo(u)
- u.Update(uri)
- ctx.redirect(u.FullURI(), statusCode)
- ReleaseURI(u)
-}
-
-// RedirectBytes sets 'Location: uri' response header and sets
-// the given statusCode.
-//
-// statusCode must have one of the following values:
-//
-// - StatusMovedPermanently (301)
-// - StatusFound (302)
-// - StatusSeeOther (303)
-// - StatusTemporaryRedirect (307)
-//
-// All other statusCode values are replaced by StatusFound (302).
-//
-// The redirect uri may be either absolute or relative to the current
-// request uri.
-func (ctx *RequestCtx) RedirectBytes(uri []byte, statusCode int) {
- s := b2s(uri)
- ctx.Redirect(s, statusCode)
-}
-
-func (ctx *RequestCtx) redirect(uri []byte, statusCode int) {
- ctx.Response.Header.SetCanonical(strLocation, uri)
- statusCode = getRedirectStatusCode(statusCode)
- ctx.Response.SetStatusCode(statusCode)
-}
-
-func getRedirectStatusCode(statusCode int) int {
- if statusCode == StatusMovedPermanently || statusCode == StatusFound ||
- statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect {
- return statusCode
- }
- return StatusFound
-}
-
-// SetBody sets response body to the given value.
-//
-// It is safe re-using body argument after the function returns.
-func (ctx *RequestCtx) SetBody(body []byte) {
- ctx.Response.SetBody(body)
-}
-
-// SetBodyString sets response body to the given value.
-func (ctx *RequestCtx) SetBodyString(body string) {
- ctx.Response.SetBodyString(body)
-}
-
-// ResetBody resets response body contents.
-func (ctx *RequestCtx) ResetBody() {
- ctx.Response.ResetBody()
-}
-
-// SendFile sends local file contents from the given path as response body.
-//
-// This is a shortcut to ServeFile(ctx, path).
-//
-// SendFile logs all the errors via ctx.Logger.
-//
-// See also ServeFile, FSHandler and FS.
-func (ctx *RequestCtx) SendFile(path string) {
- ServeFile(ctx, path)
-}
-
-// SendFileBytes sends local file contents from the given path as response body.
-//
-// This is a shortcut to ServeFileBytes(ctx, path).
-//
-// SendFileBytes logs all the errors via ctx.Logger.
-//
-// See also ServeFileBytes, FSHandler and FS.
-func (ctx *RequestCtx) SendFileBytes(path []byte) {
- ServeFileBytes(ctx, path)
-}
-
-// IfModifiedSince returns true if lastModified exceeds 'If-Modified-Since'
-// value from the request header.
-//
-// The function returns true also 'If-Modified-Since' request header is missing.
-func (ctx *RequestCtx) IfModifiedSince(lastModified time.Time) bool {
- ifModStr := ctx.Request.Header.peek(strIfModifiedSince)
- if len(ifModStr) == 0 {
- return true
- }
- ifMod, err := ParseHTTPDate(ifModStr)
- if err != nil {
- return true
- }
- lastModified = lastModified.Truncate(time.Second)
- return ifMod.Before(lastModified)
-}
-
-// NotModified resets response and sets '304 Not Modified' response status code.
-func (ctx *RequestCtx) NotModified() {
- ctx.Response.Reset()
- ctx.SetStatusCode(StatusNotModified)
-}
-
-// NotFound resets response and sets '404 Not Found' response status code.
-func (ctx *RequestCtx) NotFound() {
- ctx.Response.Reset()
- ctx.SetStatusCode(StatusNotFound)
- ctx.SetBodyString("404 Page not found")
-}
-
-// Write writes p into response body.
-func (ctx *RequestCtx) Write(p []byte) (int, error) {
- ctx.Response.AppendBody(p)
- return len(p), nil
-}
-
-// WriteString appends s to response body.
-func (ctx *RequestCtx) WriteString(s string) (int, error) {
- ctx.Response.AppendBodyString(s)
- return len(s), nil
-}
-
-// PostBody returns POST request body.
-//
-// The returned value is valid until RequestHandler return.
-func (ctx *RequestCtx) PostBody() []byte {
- return ctx.Request.Body()
-}
-
-// SetBodyStream sets response body stream and, optionally body size.
-//
-// bodyStream.Close() is called after finishing reading all body data
-// if it implements io.Closer.
-//
-// If bodySize is >= 0, then bodySize bytes must be provided by bodyStream
-// before returning io.EOF.
-//
-// If bodySize < 0, then bodyStream is read until io.EOF.
-//
-// See also SetBodyStreamWriter.
-func (ctx *RequestCtx) SetBodyStream(bodyStream io.Reader, bodySize int) {
- ctx.Response.SetBodyStream(bodyStream, bodySize)
-}
-
-// SetBodyStreamWriter registers the given stream writer for populating
-// response body.
-//
-// Access to RequestCtx and/or its' members is forbidden from sw.
-//
-// This function may be used in the following cases:
-//
-// - if response body is too big (more than 10MB).
-// - if response body is streamed from slow external sources.
-// - if response body must be streamed to the client in chunks.
-// (aka `http server push`).
-func (ctx *RequestCtx) SetBodyStreamWriter(sw StreamWriter) {
- ctx.Response.SetBodyStreamWriter(sw)
-}
-
-// IsBodyStream returns true if response body is set via SetBodyStream*.
-func (ctx *RequestCtx) IsBodyStream() bool {
- return ctx.Response.IsBodyStream()
-}
-
-// Logger returns logger, which may be used for logging arbitrary
-// request-specific messages inside RequestHandler.
-//
-// Each message logged via returned logger contains request-specific information
-// such as request id, request duration, local address, remote address,
-// request method and request url.
-//
-// It is safe re-using returned logger for logging multiple messages
-// for the current request.
-//
-// The returned logger is valid until returning from RequestHandler.
-func (ctx *RequestCtx) Logger() Logger {
- if ctx.logger.ctx == nil {
- ctx.logger.ctx = ctx
- }
- if ctx.logger.logger == nil {
- ctx.logger.logger = ctx.s.logger()
- }
- return &ctx.logger
-}
-
-// TimeoutError sets response status code to StatusRequestTimeout and sets
-// body to the given msg.
-//
-// All response modifications after TimeoutError call are ignored.
-//
-// TimeoutError MUST be called before returning from RequestHandler if there are
-// references to ctx and/or its members in other goroutines remain.
-//
-// Usage of this function is discouraged. Prefer eliminating ctx references
-// from pending goroutines instead of using this function.
-func (ctx *RequestCtx) TimeoutError(msg string) {
- ctx.TimeoutErrorWithCode(msg, StatusRequestTimeout)
-}
-
-// TimeoutErrorWithCode sets response body to msg and response status
-// code to statusCode.
-//
-// All response modifications after TimeoutErrorWithCode call are ignored.
-//
-// TimeoutErrorWithCode MUST be called before returning from RequestHandler
-// if there are references to ctx and/or its members in other goroutines remain.
-//
-// Usage of this function is discouraged. Prefer eliminating ctx references
-// from pending goroutines instead of using this function.
-func (ctx *RequestCtx) TimeoutErrorWithCode(msg string, statusCode int) {
- var resp Response
- resp.SetStatusCode(statusCode)
- resp.SetBodyString(msg)
- ctx.TimeoutErrorWithResponse(&resp)
-}
-
-// TimeoutErrorWithResponse marks the ctx as timed out and sends the given
-// response to the client.
-//
-// All ctx modifications after TimeoutErrorWithResponse call are ignored.
-//
-// TimeoutErrorWithResponse MUST be called before returning from RequestHandler
-// if there are references to ctx and/or its members in other goroutines remain.
-//
-// Usage of this function is discouraged. Prefer eliminating ctx references
-// from pending goroutines instead of using this function.
-func (ctx *RequestCtx) TimeoutErrorWithResponse(resp *Response) {
- respCopy := &Response{}
- resp.CopyTo(respCopy)
- ctx.timeoutResponse = respCopy
-}
-
-// ListenAndServe serves HTTP requests from the given TCP4 addr.
-//
-// Pass custom listener to Serve if you need listening on non-TCP4 media
-// such as IPv6.
-func (s *Server) ListenAndServe(addr string) error {
- ln, err := net.Listen("tcp4", addr)
- if err != nil {
- return err
- }
- return s.Serve(ln)
-}
-
-// ListenAndServeUNIX serves HTTP requests from the given UNIX addr.
-//
-// The function deletes existing file at addr before starting serving.
-//
-// The server sets the given file mode for the UNIX addr.
-func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode) error {
- if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("unexpected error when trying to remove unix socket file %q: %s", addr, err)
- }
- ln, err := net.Listen("unix", addr)
- if err != nil {
- return err
- }
- if err = os.Chmod(addr, mode); err != nil {
- return fmt.Errorf("cannot chmod %#o for %q: %s", mode, addr, err)
- }
- return s.Serve(ln)
-}
-
-// ListenAndServeTLS serves HTTPS requests from the given TCP4 addr.
-//
-// certFile and keyFile are paths to TLS certificate and key files.
-//
-// Pass custom listener to Serve if you need listening on non-TCP4 media
-// such as IPv6.
-func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string) error {
- ln, err := net.Listen("tcp4", addr)
- if err != nil {
- return err
- }
- return s.ServeTLS(ln, certFile, keyFile)
-}
-
-// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP4 addr.
-//
-// certData and keyData must contain valid TLS certificate and key data.
-//
-// Pass custom listener to Serve if you need listening on arbitrary media
-// such as IPv6.
-func (s *Server) ListenAndServeTLSEmbed(addr string, certData, keyData []byte) error {
- ln, err := net.Listen("tcp4", addr)
- if err != nil {
- return err
- }
- return s.ServeTLSEmbed(ln, certData, keyData)
-}
-
-// ServeTLS serves HTTPS requests from the given listener.
-//
-// certFile and keyFile are paths to TLS certificate and key files.
-func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string) error {
- lnTLS, err := newTLSListener(ln, certFile, keyFile)
- if err != nil {
- return err
- }
- return s.Serve(lnTLS)
-}
-
-// ServeTLSEmbed serves HTTPS requests from the given listener.
-//
-// certData and keyData must contain valid TLS certificate and key data.
-func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte) error {
- lnTLS, err := newTLSListenerEmbed(ln, certData, keyData)
- if err != nil {
- return err
- }
- return s.Serve(lnTLS)
-}
-
-func newTLSListener(ln net.Listener, certFile, keyFile string) (net.Listener, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err)
- }
- return newCertListener(ln, &cert), nil
-}
-
-func newTLSListenerEmbed(ln net.Listener, certData, keyData []byte) (net.Listener, error) {
- cert, err := tls.X509KeyPair(certData, keyData)
- if err != nil {
- return nil, fmt.Errorf("cannot load TLS key pair from the provided certData(%d) and keyData(%d): %s",
- len(certData), len(keyData), err)
- }
- return newCertListener(ln, &cert), nil
-}
-
-func newCertListener(ln net.Listener, cert *tls.Certificate) net.Listener {
- tlsConfig := &tls.Config{
- Certificates: []tls.Certificate{*cert},
- PreferServerCipherSuites: true,
- }
- return tls.NewListener(ln, tlsConfig)
-}
-
-// DefaultConcurrency is the maximum number of concurrent connections
-// the Server may serve by default (i.e. if Server.Concurrency isn't set).
-const DefaultConcurrency = 256 * 1024
-
-// Serve serves incoming connections from the given listener.
-//
-// Serve blocks until the given listener returns permanent error.
-func (s *Server) Serve(ln net.Listener) error {
- var lastOverflowErrorTime time.Time
- var lastPerIPErrorTime time.Time
- var c net.Conn
- var err error
-
- maxWorkersCount := s.getConcurrency()
- s.concurrencyCh = make(chan struct{}, maxWorkersCount)
- wp := &workerPool{
- WorkerFunc: s.serveConn,
- MaxWorkersCount: maxWorkersCount,
- LogAllErrors: s.LogAllErrors,
- Logger: s.logger(),
- }
- wp.Start()
-
- for {
- if c, err = acceptConn(s, ln, &lastPerIPErrorTime); err != nil {
- wp.Stop()
- if err == io.EOF {
- return nil
- }
- return err
- }
- if !wp.Serve(c) {
- s.writeFastError(c, StatusServiceUnavailable,
- "The connection cannot be served because Server.Concurrency limit exceeded")
- c.Close()
- if time.Since(lastOverflowErrorTime) > time.Minute {
- s.logger().Printf("The incoming connection cannot be served, because %d concurrent connections are served. "+
- "Try increasing Server.Concurrency", maxWorkersCount)
- lastOverflowErrorTime = time.Now()
- }
-
- // The current server reached concurrency limit,
- // so give other concurrently running servers a chance
- // accepting incoming connections on the same address.
- //
- // There is a hope other servers didn't reach their
- // concurrency limits yet :)
- time.Sleep(100 * time.Millisecond)
- }
- c = nil
- }
-}
-
-func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.Conn, error) {
- for {
- c, err := ln.Accept()
- if err != nil {
- if c != nil {
- panic("BUG: net.Listener returned non-nil conn and non-nil error")
- }
- if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
- s.logger().Printf("Temporary error when accepting new connections: %s", netErr)
- time.Sleep(time.Second)
- continue
- }
- if err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") {
- s.logger().Printf("Permanent error when accepting new connections: %s", err)
- return nil, err
- }
- return nil, io.EOF
- }
- if c == nil {
- panic("BUG: net.Listener returned (nil, nil)")
- }
- if s.MaxConnsPerIP > 0 {
- pic := wrapPerIPConn(s, c)
- if pic == nil {
- if time.Since(*lastPerIPErrorTime) > time.Minute {
- s.logger().Printf("The number of connections from %s exceeds MaxConnsPerIP=%d",
- getConnIP4(c), s.MaxConnsPerIP)
- *lastPerIPErrorTime = time.Now()
- }
- continue
- }
- c = pic
- }
- return c, nil
- }
-}
-
-func wrapPerIPConn(s *Server, c net.Conn) net.Conn {
- ip := getUint32IP(c)
- if ip == 0 {
- return c
- }
- n := s.perIPConnCounter.Register(ip)
- if n > s.MaxConnsPerIP {
- s.perIPConnCounter.Unregister(ip)
- s.writeFastError(c, StatusTooManyRequests, "The number of connections from your ip exceeds MaxConnsPerIP")
- c.Close()
- return nil
- }
- return acquirePerIPConn(c, ip, &s.perIPConnCounter)
-}
-
-var defaultLogger = Logger(log.New(os.Stderr, "", log.LstdFlags))
-
-func (s *Server) logger() Logger {
- if s.Logger != nil {
- return s.Logger
- }
- return defaultLogger
-}
-
-var (
- // ErrPerIPConnLimit may be returned from ServeConn if the number of connections
- // per ip exceeds Server.MaxConnsPerIP.
- ErrPerIPConnLimit = errors.New("too many connections per ip")
-
- // ErrConcurrencyLimit may be returned from ServeConn if the number
- // of concurrenty served connections exceeds Server.Concurrency.
- ErrConcurrencyLimit = errors.New("canot serve the connection because Server.Concurrency concurrent connections are served")
-
- // ErrKeepaliveTimeout is returned from ServeConn
- // if the connection lifetime exceeds MaxKeepaliveDuration.
- ErrKeepaliveTimeout = errors.New("exceeded MaxKeepaliveDuration")
-)
-
-// ServeConn serves HTTP requests from the given connection.
-//
-// ServeConn returns nil if all requests from the c are successfully served.
-// It returns non-nil error otherwise.
-//
-// Connection c must immediately propagate all the data passed to Write()
-// to the client. Otherwise requests' processing may hang.
-//
-// ServeConn closes c before returning.
-func (s *Server) ServeConn(c net.Conn) error {
- if s.MaxConnsPerIP > 0 {
- pic := wrapPerIPConn(s, c)
- if pic == nil {
- return ErrPerIPConnLimit
- }
- c = pic
- }
-
- n := atomic.AddUint32(&s.concurrency, 1)
- if n > uint32(s.getConcurrency()) {
- atomic.AddUint32(&s.concurrency, ^uint32(0))
- s.writeFastError(c, StatusServiceUnavailable, "The connection cannot be served because Server.Concurrency limit exceeded")
- c.Close()
- return ErrConcurrencyLimit
- }
-
- err := s.serveConn(c)
-
- atomic.AddUint32(&s.concurrency, ^uint32(0))
-
- if err != errHijacked {
- err1 := c.Close()
- if err == nil {
- err = err1
- }
- } else {
- err = nil
- }
- return err
-}
-
-var errHijacked = errors.New("connection has been hijacked")
-
-func (s *Server) getConcurrency() int {
- n := s.Concurrency
- if n <= 0 {
- n = DefaultConcurrency
- }
- return n
-}
-
-var globalConnID uint64
-
-func nextConnID() uint64 {
- return atomic.AddUint64(&globalConnID, 1)
-}
-
-// DefaultMaxRequestBodySize is the maximum request body size the server
-// reads by default.
-//
-// See Server.MaxRequestBodySize for details.
-const DefaultMaxRequestBodySize = 4 * 1024 * 1024
-
-func (s *Server) serveConn(c net.Conn) error {
- serverName := s.getServerName()
- connRequestNum := uint64(0)
- connID := nextConnID()
- currentTime := time.Now()
- connTime := currentTime
- maxRequestBodySize := s.MaxRequestBodySize
- if maxRequestBodySize <= 0 {
- maxRequestBodySize = DefaultMaxRequestBodySize
- }
-
- ctx := s.acquireCtx(c)
- ctx.connTime = connTime
- isTLS := ctx.IsTLS()
- var (
- br *bufio.Reader
- bw *bufio.Writer
-
- err error
- timeoutResponse *Response
- hijackHandler HijackHandler
-
- lastReadDeadlineTime time.Time
- lastWriteDeadlineTime time.Time
-
- connectionClose bool
- isHTTP11 bool
- )
- for {
- connRequestNum++
- ctx.time = currentTime
-
- if s.ReadTimeout > 0 || s.MaxKeepaliveDuration > 0 {
- lastReadDeadlineTime = s.updateReadDeadline(c, ctx, lastReadDeadlineTime)
- if lastReadDeadlineTime.IsZero() {
- err = ErrKeepaliveTimeout
- break
- }
- }
-
- if !(s.ReduceMemoryUsage || ctx.lastReadDuration > time.Second) || br != nil {
- if br == nil {
- br = acquireReader(ctx)
- }
- } else {
- br, err = acquireByteReader(&ctx)
- }
- ctx.Request.isTLS = isTLS
-
- if err == nil {
- err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly)
- if br.Buffered() == 0 || err != nil {
- releaseReader(s, br)
- br = nil
- }
- }
-
- currentTime = time.Now()
- ctx.lastReadDuration = currentTime.Sub(ctx.time)
-
- if err != nil {
- if err == io.EOF {
- err = nil
- } else {
- bw = writeErrorResponse(bw, ctx, err)
- }
- break
- }
-
- // 'Expect: 100-continue' request handling.
- // See http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html for details.
- if !ctx.Request.Header.noBody() && ctx.Request.MayContinue() {
- // Send 'HTTP/1.1 100 Continue' response.
- if bw == nil {
- bw = acquireWriter(ctx)
- }
- bw.Write(strResponseContinue)
- err = bw.Flush()
- releaseWriter(s, bw)
- bw = nil
- if err != nil {
- break
- }
-
- // Read request body.
- if br == nil {
- br = acquireReader(ctx)
- }
- err = ctx.Request.ContinueReadBody(br, maxRequestBodySize)
- if br.Buffered() == 0 || err != nil {
- releaseReader(s, br)
- br = nil
- }
- if err != nil {
- bw = writeErrorResponse(bw, ctx, err)
- break
- }
- }
-
- connectionClose = s.DisableKeepalive || ctx.Request.Header.connectionCloseFast()
- isHTTP11 = ctx.Request.Header.IsHTTP11()
-
- ctx.Response.Header.SetServerBytes(serverName)
- ctx.connID = connID
- ctx.connRequestNum = connRequestNum
- ctx.connTime = connTime
- ctx.time = currentTime
- s.Handler(ctx)
-
- timeoutResponse = ctx.timeoutResponse
- if timeoutResponse != nil {
- ctx = s.acquireCtx(c)
- timeoutResponse.CopyTo(&ctx.Response)
- if br != nil {
- // Close connection, since br may be attached to the old ctx via ctx.fbr.
- ctx.SetConnectionClose()
- }
- }
-
- if !ctx.IsGet() && ctx.IsHead() {
- ctx.Response.SkipBody = true
- }
- ctx.Request.Reset()
-
- hijackHandler = ctx.hijackHandler
- ctx.hijackHandler = nil
-
- ctx.userValues.Reset()
-
- if s.MaxRequestsPerConn > 0 && connRequestNum >= uint64(s.MaxRequestsPerConn) {
- ctx.SetConnectionClose()
- }
-
- if s.WriteTimeout > 0 || s.MaxKeepaliveDuration > 0 {
- lastWriteDeadlineTime = s.updateWriteDeadline(c, ctx, lastWriteDeadlineTime)
- }
-
- // Verify Request.Header.connectionCloseFast() again,
- // since request handler might trigger full headers' parsing.
- connectionClose = connectionClose || ctx.Request.Header.connectionCloseFast() || ctx.Response.ConnectionClose()
- if connectionClose {
- ctx.Response.Header.SetCanonical(strConnection, strClose)
- } else if !isHTTP11 {
- // Set 'Connection: keep-alive' response header for non-HTTP/1.1 request.
- // There is no need in setting this header for http/1.1, since in http/1.1
- // connections are keep-alive by default.
- ctx.Response.Header.SetCanonical(strConnection, strKeepAlive)
- }
-
- if len(ctx.Response.Header.Server()) == 0 {
- ctx.Response.Header.SetServerBytes(serverName)
- }
-
- if bw == nil {
- bw = acquireWriter(ctx)
- }
- if err = writeResponse(ctx, bw); err != nil {
- break
- }
-
- if br == nil || connectionClose {
- err = bw.Flush()
- releaseWriter(s, bw)
- bw = nil
- if err != nil {
- break
- }
- if connectionClose {
- break
- }
- }
-
- if hijackHandler != nil {
- var hjr io.Reader
- hjr = c
- if br != nil {
- hjr = br
- br = nil
-
- // br may point to ctx.fbr, so do not return ctx into pool.
- ctx = s.acquireCtx(c)
- }
- if bw != nil {
- err = bw.Flush()
- releaseWriter(s, bw)
- bw = nil
- if err != nil {
- break
- }
- }
- c.SetReadDeadline(zeroTime)
- c.SetWriteDeadline(zeroTime)
- go hijackConnHandler(hjr, c, s, hijackHandler)
- hijackHandler = nil
- err = errHijacked
- break
- }
-
- currentTime = time.Now()
- }
-
- if br != nil {
- releaseReader(s, br)
- }
- if bw != nil {
- releaseWriter(s, bw)
- }
- s.releaseCtx(ctx)
- return err
-}
-
-func (s *Server) updateReadDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time {
- readTimeout := s.ReadTimeout
- currentTime := ctx.time
- if s.MaxKeepaliveDuration > 0 {
- connTimeout := s.MaxKeepaliveDuration - currentTime.Sub(ctx.connTime)
- if connTimeout <= 0 {
- return zeroTime
- }
- if connTimeout < readTimeout {
- readTimeout = connTimeout
- }
- }
-
- // Optimization: update read deadline only if more than 25%
- // of the last read deadline exceeded.
- // See https://github.com/golang/go/issues/15133 for details.
- if currentTime.Sub(lastDeadlineTime) > (readTimeout >> 2) {
- if err := c.SetReadDeadline(currentTime.Add(readTimeout)); err != nil {
- panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", readTimeout, err))
- }
- lastDeadlineTime = currentTime
- }
- return lastDeadlineTime
-}
-
-func (s *Server) updateWriteDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time {
- writeTimeout := s.WriteTimeout
- if s.MaxKeepaliveDuration > 0 {
- connTimeout := s.MaxKeepaliveDuration - time.Since(ctx.connTime)
- if connTimeout <= 0 {
- // MaxKeepAliveDuration exceeded, but let's try sending response anyway
- // in 100ms with 'Connection: close' header.
- ctx.SetConnectionClose()
- connTimeout = 100 * time.Millisecond
- }
- if connTimeout < writeTimeout {
- writeTimeout = connTimeout
- }
- }
-
- // Optimization: update write deadline only if more than 25%
- // of the last write deadline exceeded.
- // See https://github.com/golang/go/issues/15133 for details.
- currentTime := time.Now()
- if currentTime.Sub(lastDeadlineTime) > (writeTimeout >> 2) {
- if err := c.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil {
- panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", writeTimeout, err))
- }
- lastDeadlineTime = currentTime
- }
- return lastDeadlineTime
-}
-
-func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) {
- hjc := s.acquireHijackConn(r, c)
- h(hjc)
-
- if br, ok := r.(*bufio.Reader); ok {
- releaseReader(s, br)
- }
- c.Close()
- s.releaseHijackConn(hjc)
-}
-
-func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn {
- v := s.hijackConnPool.Get()
- if v == nil {
- hjc := &hijackConn{
- Conn: c,
- r: r,
- }
- return hjc
- }
- hjc := v.(*hijackConn)
- hjc.Conn = c
- hjc.r = r
- return hjc
-}
-
-func (s *Server) releaseHijackConn(hjc *hijackConn) {
- hjc.Conn = nil
- hjc.r = nil
- s.hijackConnPool.Put(hjc)
-}
-
-type hijackConn struct {
- net.Conn
- r io.Reader
-}
-
-func (c hijackConn) Read(p []byte) (int, error) {
- return c.r.Read(p)
-}
-
-func (c hijackConn) Close() error {
- // hijacked conn is closed in hijackConnHandler.
- return nil
-}
-
-// LastTimeoutErrorResponse returns the last timeout response set
-// via TimeoutError* call.
-//
-// This function is intended for custom server implementations.
-func (ctx *RequestCtx) LastTimeoutErrorResponse() *Response {
- return ctx.timeoutResponse
-}
-
-func writeResponse(ctx *RequestCtx, w *bufio.Writer) error {
- if ctx.timeoutResponse != nil {
- panic("BUG: cannot write timed out response")
- }
- err := ctx.Response.Write(w)
- ctx.Response.Reset()
- return err
-}
-
-const (
- defaultReadBufferSize = 4096
- defaultWriteBufferSize = 4096
-)
-
-func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) {
- ctx := *ctxP
- s := ctx.s
- c := ctx.c
- t := ctx.time
- s.releaseCtx(ctx)
-
- // Make GC happy, so it could garbage collect ctx
- // while we waiting for the next request.
- ctx = nil
- *ctxP = nil
-
- v := s.bytePool.Get()
- if v == nil {
- v = make([]byte, 1)
- }
- b := v.([]byte)
- n, err := c.Read(b)
- ch := b[0]
- s.bytePool.Put(v)
- ctx = s.acquireCtx(c)
- ctx.time = t
- *ctxP = ctx
- if err != nil {
- // Treat all errors as EOF on unsuccessful read
- // of the first request byte.
- return nil, io.EOF
- }
- if n != 1 {
- panic("BUG: Reader must return at least one byte")
- }
-
- ctx.fbr.c = c
- ctx.fbr.ch = ch
- ctx.fbr.byteRead = false
- r := acquireReader(ctx)
- r.Reset(&ctx.fbr)
- return r, nil
-}
-
-func acquireReader(ctx *RequestCtx) *bufio.Reader {
- v := ctx.s.readerPool.Get()
- if v == nil {
- n := ctx.s.ReadBufferSize
- if n <= 0 {
- n = defaultReadBufferSize
- }
- return bufio.NewReaderSize(ctx.c, n)
- }
- r := v.(*bufio.Reader)
- r.Reset(ctx.c)
- return r
-}
-
-func releaseReader(s *Server, r *bufio.Reader) {
- s.readerPool.Put(r)
-}
-
-func acquireWriter(ctx *RequestCtx) *bufio.Writer {
- v := ctx.s.writerPool.Get()
- if v == nil {
- n := ctx.s.WriteBufferSize
- if n <= 0 {
- n = defaultWriteBufferSize
- }
- return bufio.NewWriterSize(ctx.c, n)
- }
- w := v.(*bufio.Writer)
- w.Reset(ctx.c)
- return w
-}
-
-func releaseWriter(s *Server, w *bufio.Writer) {
- s.writerPool.Put(w)
-}
-
-func (s *Server) acquireCtx(c net.Conn) *RequestCtx {
- v := s.ctxPool.Get()
- var ctx *RequestCtx
- if v == nil {
- ctx = &RequestCtx{
- s: s,
- }
- keepBodyBuffer := !s.ReduceMemoryUsage
- ctx.Request.keepBodyBuffer = keepBodyBuffer
- ctx.Response.keepBodyBuffer = keepBodyBuffer
- } else {
- ctx = v.(*RequestCtx)
- }
- ctx.c = c
- return ctx
-}
-
-// Init2 prepares ctx for passing to RequestHandler.
-//
-// conn is used only for determining local and remote addresses.
-//
-// This function is intended for custom Server implementations.
-// See https://github.com/valyala/httpteleport for details.
-func (ctx *RequestCtx) Init2(conn net.Conn, logger Logger, reduceMemoryUsage bool) {
- ctx.c = conn
- ctx.logger.logger = logger
- ctx.connID = nextConnID()
- ctx.s = fakeServer
- ctx.connRequestNum = 0
- ctx.connTime = time.Now()
- ctx.time = ctx.connTime
-
- keepBodyBuffer := !reduceMemoryUsage
- ctx.Request.keepBodyBuffer = keepBodyBuffer
- ctx.Response.keepBodyBuffer = keepBodyBuffer
-}
-
-// Init prepares ctx for passing to RequestHandler.
-//
-// remoteAddr and logger are optional. They are used by RequestCtx.Logger().
-//
-// This function is intended for custom Server implementations.
-func (ctx *RequestCtx) Init(req *Request, remoteAddr net.Addr, logger Logger) {
- if remoteAddr == nil {
- remoteAddr = zeroTCPAddr
- }
- c := &fakeAddrer{
- laddr: zeroTCPAddr,
- raddr: remoteAddr,
- }
- if logger == nil {
- logger = defaultLogger
- }
- ctx.Init2(c, logger, true)
- req.CopyTo(&ctx.Request)
-}
-
-var fakeServer = &Server{
- // Initialize concurrencyCh for TimeoutHandler
- concurrencyCh: make(chan struct{}, DefaultConcurrency),
-}
-
-type fakeAddrer struct {
- net.Conn
- laddr net.Addr
- raddr net.Addr
-}
-
-func (fa *fakeAddrer) RemoteAddr() net.Addr {
- return fa.raddr
-}
-
-func (fa *fakeAddrer) LocalAddr() net.Addr {
- return fa.laddr
-}
-
-func (fa *fakeAddrer) Read(p []byte) (int, error) {
- panic("BUG: unexpected Read call")
-}
-
-func (fa *fakeAddrer) Write(p []byte) (int, error) {
- panic("BUG: unexpected Write call")
-}
-
-func (fa *fakeAddrer) Close() error {
- panic("BUG: unexpected Close call")
-}
-
-func (s *Server) releaseCtx(ctx *RequestCtx) {
- if ctx.timeoutResponse != nil {
- panic("BUG: cannot release timed out RequestCtx")
- }
- ctx.c = nil
- ctx.fbr.c = nil
- s.ctxPool.Put(ctx)
-}
-
-func (s *Server) getServerName() []byte {
- v := s.serverName.Load()
- var serverName []byte
- if v == nil {
- serverName = []byte(s.Name)
- if len(serverName) == 0 {
- serverName = defaultServerName
- }
- s.serverName.Store(serverName)
- } else {
- serverName = v.([]byte)
- }
- return serverName
-}
-
-func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) {
- w.Write(statusLine(statusCode))
- fmt.Fprintf(w, "Connection: close\r\n"+
- "Server: %s\r\n"+
- "Date: %s\r\n"+
- "Content-Type: text/plain\r\n"+
- "Content-Length: %d\r\n"+
- "\r\n"+
- "%s",
- s.getServerName(), serverDate.Load(), len(msg), msg)
-}
-
-func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, err error) *bufio.Writer {
- if _, ok := err.(*ErrSmallBuffer); ok {
- ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge)
- } else {
- ctx.Error("Error when parsing request", StatusBadRequest)
- }
- ctx.SetConnectionClose()
- if bw == nil {
- bw = acquireWriter(ctx)
- }
- writeResponse(ctx, bw)
- bw.Flush()
- return bw
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/ssl-cert-snakeoil.key b/vendor/github.com/VictoriaMetrics/fasthttp/ssl-cert-snakeoil.key
deleted file mode 100644
index 00a79a3b5..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/ssl-cert-snakeoil.key
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
-3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
-wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
-FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
-IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
-GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
-sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
-sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
-uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
-K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
-YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
-DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
-B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
-Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
-IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
-wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
-wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
-FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
-tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
-fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
-ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
-K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
-6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
-9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
-Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
-c257YgaWmjK9uB0Y2r2VxS0G
------END PRIVATE KEY-----
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/ssl-cert-snakeoil.pem b/vendor/github.com/VictoriaMetrics/fasthttp/ssl-cert-snakeoil.pem
deleted file mode 100644
index 93e77cd95..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/ssl-cert-snakeoil.pem
+++ /dev/null
@@ -1,17 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
-BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
-MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
-K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
-+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
-L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
-xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
-6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
-SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
-L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
-45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
-K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
-X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
-whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
------END CERTIFICATE-----
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/stackless/doc.go b/vendor/github.com/VictoriaMetrics/fasthttp/stackless/doc.go
deleted file mode 100644
index 8c0cc497c..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/stackless/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// Package stackless provides functionality that may save stack space
-// for high number of concurrently running goroutines.
-package stackless
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/stackless/func.go b/vendor/github.com/VictoriaMetrics/fasthttp/stackless/func.go
deleted file mode 100644
index 9a49bcc26..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/stackless/func.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package stackless
-
-import (
- "runtime"
- "sync"
-)
-
-// NewFunc returns stackless wrapper for the function f.
-//
-// Unlike f, the returned stackless wrapper doesn't use stack space
-// on the goroutine that calls it.
-// The wrapper may save a lot of stack space if the following conditions
-// are met:
-//
-// - f doesn't contain blocking calls on network, I/O or channels;
-// - f uses a lot of stack space;
-// - the wrapper is called from high number of concurrent goroutines.
-//
-// The stackless wrapper returns false if the call cannot be processed
-// at the moment due to high load.
-func NewFunc(f func(ctx interface{})) func(ctx interface{}) bool {
- if f == nil {
- panic("BUG: f cannot be nil")
- }
-
- funcWorkCh := make(chan *funcWork, runtime.GOMAXPROCS(-1)*2048)
- onceInit := func() {
- n := runtime.GOMAXPROCS(-1)
- for i := 0; i < n; i++ {
- go funcWorker(funcWorkCh, f)
- }
- }
- var once sync.Once
-
- return func(ctx interface{}) bool {
- once.Do(onceInit)
- fw := getFuncWork()
- fw.ctx = ctx
-
- select {
- case funcWorkCh <- fw:
- default:
- putFuncWork(fw)
- return false
- }
- <-fw.done
- putFuncWork(fw)
- return true
- }
-}
-
-func funcWorker(funcWorkCh <-chan *funcWork, f func(ctx interface{})) {
- for fw := range funcWorkCh {
- f(fw.ctx)
- fw.done <- struct{}{}
- }
-}
-
-func getFuncWork() *funcWork {
- v := funcWorkPool.Get()
- if v == nil {
- v = &funcWork{
- done: make(chan struct{}, 1),
- }
- }
- return v.(*funcWork)
-}
-
-func putFuncWork(fw *funcWork) {
- fw.ctx = nil
- funcWorkPool.Put(fw)
-}
-
-var funcWorkPool sync.Pool
-
-type funcWork struct {
- ctx interface{}
- done chan struct{}
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/stackless/writer.go b/vendor/github.com/VictoriaMetrics/fasthttp/stackless/writer.go
deleted file mode 100644
index 9b9ff09d5..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/stackless/writer.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package stackless
-
-import (
- "errors"
- "fmt"
- "github.com/valyala/bytebufferpool"
- "io"
-)
-
-// Writer is an interface stackless writer must conform to.
-//
-// The interface contains common subset for Writers from compress/* packages.
-type Writer interface {
- Write(p []byte) (int, error)
- Flush() error
- Close() error
- Reset(w io.Writer)
-}
-
-// NewWriterFunc must return new writer that will be wrapped into
-// stackless writer.
-type NewWriterFunc func(w io.Writer) Writer
-
-// NewWriter creates a stackless writer around a writer returned
-// from newWriter.
-//
-// The returned writer writes data to dstW.
-//
-// Writers that use a lot of stack space may be wrapped into stackless writer,
-// thus saving stack space for high number of concurrently running goroutines.
-func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer {
- w := &writer{
- dstW: dstW,
- }
- w.zw = newWriter(&w.xw)
- return w
-}
-
-type writer struct {
- dstW io.Writer
- zw Writer
- xw xWriter
-
- err error
- n int
-
- p []byte
- op op
-}
-
-type op int
-
-const (
- opWrite op = iota
- opFlush
- opClose
- opReset
-)
-
-func (w *writer) Write(p []byte) (int, error) {
- w.p = p
- err := w.do(opWrite)
- w.p = nil
- return w.n, err
-}
-
-func (w *writer) Flush() error {
- return w.do(opFlush)
-}
-
-func (w *writer) Close() error {
- return w.do(opClose)
-}
-
-func (w *writer) Reset(dstW io.Writer) {
- w.xw.Reset()
- w.do(opReset)
- w.dstW = dstW
-}
-
-func (w *writer) do(op op) error {
- w.op = op
- if !stacklessWriterFunc(w) {
- return errHighLoad
- }
- err := w.err
- if err != nil {
- return err
- }
- if w.xw.bb != nil && len(w.xw.bb.B) > 0 {
- _, err = w.dstW.Write(w.xw.bb.B)
- }
- w.xw.Reset()
-
- return err
-}
-
-var errHighLoad = errors.New("cannot compress data due to high load")
-
-var stacklessWriterFunc = NewFunc(writerFunc)
-
-func writerFunc(ctx interface{}) {
- w := ctx.(*writer)
- switch w.op {
- case opWrite:
- w.n, w.err = w.zw.Write(w.p)
- case opFlush:
- w.err = w.zw.Flush()
- case opClose:
- w.err = w.zw.Close()
- case opReset:
- w.zw.Reset(&w.xw)
- w.err = nil
- default:
- panic(fmt.Sprintf("BUG: unexpected op: %d", w.op))
- }
-}
-
-type xWriter struct {
- bb *bytebufferpool.ByteBuffer
-}
-
-func (w *xWriter) Write(p []byte) (int, error) {
- if w.bb == nil {
- w.bb = bufferPool.Get()
- }
- w.bb.Write(p)
- return len(p), nil
-}
-
-func (w *xWriter) Reset() {
- if w.bb != nil {
- bufferPool.Put(w.bb)
- w.bb = nil
- }
-}
-
-var bufferPool bytebufferpool.Pool
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/status.go b/vendor/github.com/VictoriaMetrics/fasthttp/status.go
deleted file mode 100644
index 6687efb42..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/status.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package fasthttp
-
-import (
- "fmt"
- "sync/atomic"
-)
-
-// HTTP status codes were stolen from net/http.
-const (
- StatusContinue = 100 // RFC 7231, 6.2.1
- StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2
- StatusProcessing = 102 // RFC 2518, 10.1
-
- StatusOK = 200 // RFC 7231, 6.3.1
- StatusCreated = 201 // RFC 7231, 6.3.2
- StatusAccepted = 202 // RFC 7231, 6.3.3
- StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4
- StatusNoContent = 204 // RFC 7231, 6.3.5
- StatusResetContent = 205 // RFC 7231, 6.3.6
- StatusPartialContent = 206 // RFC 7233, 4.1
- StatusMultiStatus = 207 // RFC 4918, 11.1
- StatusAlreadyReported = 208 // RFC 5842, 7.1
- StatusIMUsed = 226 // RFC 3229, 10.4.1
-
- StatusMultipleChoices = 300 // RFC 7231, 6.4.1
- StatusMovedPermanently = 301 // RFC 7231, 6.4.2
- StatusFound = 302 // RFC 7231, 6.4.3
- StatusSeeOther = 303 // RFC 7231, 6.4.4
- StatusNotModified = 304 // RFC 7232, 4.1
- StatusUseProxy = 305 // RFC 7231, 6.4.5
- _ = 306 // RFC 7231, 6.4.6 (Unused)
- StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7
- StatusPermanentRedirect = 308 // RFC 7538, 3
-
- StatusBadRequest = 400 // RFC 7231, 6.5.1
- StatusUnauthorized = 401 // RFC 7235, 3.1
- StatusPaymentRequired = 402 // RFC 7231, 6.5.2
- StatusForbidden = 403 // RFC 7231, 6.5.3
- StatusNotFound = 404 // RFC 7231, 6.5.4
- StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5
- StatusNotAcceptable = 406 // RFC 7231, 6.5.6
- StatusProxyAuthRequired = 407 // RFC 7235, 3.2
- StatusRequestTimeout = 408 // RFC 7231, 6.5.7
- StatusConflict = 409 // RFC 7231, 6.5.8
- StatusGone = 410 // RFC 7231, 6.5.9
- StatusLengthRequired = 411 // RFC 7231, 6.5.10
- StatusPreconditionFailed = 412 // RFC 7232, 4.2
- StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11
- StatusRequestURITooLong = 414 // RFC 7231, 6.5.12
- StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13
- StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4
- StatusExpectationFailed = 417 // RFC 7231, 6.5.14
- StatusTeapot = 418 // RFC 7168, 2.3.3
- StatusUnprocessableEntity = 422 // RFC 4918, 11.2
- StatusLocked = 423 // RFC 4918, 11.3
- StatusFailedDependency = 424 // RFC 4918, 11.4
- StatusUpgradeRequired = 426 // RFC 7231, 6.5.15
- StatusPreconditionRequired = 428 // RFC 6585, 3
- StatusTooManyRequests = 429 // RFC 6585, 4
- StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
- StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
-
- StatusInternalServerError = 500 // RFC 7231, 6.6.1
- StatusNotImplemented = 501 // RFC 7231, 6.6.2
- StatusBadGateway = 502 // RFC 7231, 6.6.3
- StatusServiceUnavailable = 503 // RFC 7231, 6.6.4
- StatusGatewayTimeout = 504 // RFC 7231, 6.6.5
- StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6
- StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
- StatusInsufficientStorage = 507 // RFC 4918, 11.5
- StatusLoopDetected = 508 // RFC 5842, 7.2
- StatusNotExtended = 510 // RFC 2774, 7
- StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
-)
-
-var (
- statusLines atomic.Value
-
- statusMessages = map[int]string{
- StatusContinue: "Continue",
- StatusSwitchingProtocols: "Switching Protocols",
- StatusProcessing: "Processing",
-
- StatusOK: "OK",
- StatusCreated: "Created",
- StatusAccepted: "Accepted",
- StatusNonAuthoritativeInfo: "Non-Authoritative Information",
- StatusNoContent: "No Content",
- StatusResetContent: "Reset Content",
- StatusPartialContent: "Partial Content",
- StatusMultiStatus: "Multi-Status",
- StatusAlreadyReported: "Already Reported",
- StatusIMUsed: "IM Used",
-
- StatusMultipleChoices: "Multiple Choices",
- StatusMovedPermanently: "Moved Permanently",
- StatusFound: "Found",
- StatusSeeOther: "See Other",
- StatusNotModified: "Not Modified",
- StatusUseProxy: "Use Proxy",
- StatusTemporaryRedirect: "Temporary Redirect",
- StatusPermanentRedirect: "Permanent Redirect",
-
- StatusBadRequest: "Bad Request",
- StatusUnauthorized: "Unauthorized",
- StatusPaymentRequired: "Payment Required",
- StatusForbidden: "Forbidden",
- StatusNotFound: "Not Found",
- StatusMethodNotAllowed: "Method Not Allowed",
- StatusNotAcceptable: "Not Acceptable",
- StatusProxyAuthRequired: "Proxy Authentication Required",
- StatusRequestTimeout: "Request Timeout",
- StatusConflict: "Conflict",
- StatusGone: "Gone",
- StatusLengthRequired: "Length Required",
- StatusPreconditionFailed: "Precondition Failed",
- StatusRequestEntityTooLarge: "Request Entity Too Large",
- StatusRequestURITooLong: "Request URI Too Long",
- StatusUnsupportedMediaType: "Unsupported Media Type",
- StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
- StatusExpectationFailed: "Expectation Failed",
- StatusTeapot: "I'm a teapot",
- StatusUnprocessableEntity: "Unprocessable Entity",
- StatusLocked: "Locked",
- StatusFailedDependency: "Failed Dependency",
- StatusUpgradeRequired: "Upgrade Required",
- StatusPreconditionRequired: "Precondition Required",
- StatusTooManyRequests: "Too Many Requests",
- StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large",
- StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons",
-
- StatusInternalServerError: "Internal Server Error",
- StatusNotImplemented: "Not Implemented",
- StatusBadGateway: "Bad Gateway",
- StatusServiceUnavailable: "Service Unavailable",
- StatusGatewayTimeout: "Gateway Timeout",
- StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
- StatusVariantAlsoNegotiates: "Variant Also Negotiates",
- StatusInsufficientStorage: "Insufficient Storage",
- StatusLoopDetected: "Loop Detected",
- StatusNotExtended: "Not Extended",
- StatusNetworkAuthenticationRequired: "Network Authentication Required",
- }
-)
-
-// StatusMessage returns HTTP status message for the given status code.
-func StatusMessage(statusCode int) string {
- s := statusMessages[statusCode]
- if s == "" {
- s = "Unknown Status Code"
- }
- return s
-}
-
-func init() {
- statusLines.Store(make(map[int][]byte))
-}
-
-func statusLine(statusCode int) []byte {
- m := statusLines.Load().(map[int][]byte)
- h := m[statusCode]
- if h != nil {
- return h
- }
-
- statusText := StatusMessage(statusCode)
-
- h = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, statusText))
- newM := make(map[int][]byte, len(m)+1)
- for k, v := range m {
- newM[k] = v
- }
- newM[statusCode] = h
- statusLines.Store(newM)
- return h
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/stream.go b/vendor/github.com/VictoriaMetrics/fasthttp/stream.go
deleted file mode 100644
index 0b2ccc881..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/stream.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package fasthttp
-
-import (
- "bufio"
- "io"
- "sync"
-
- "github.com/VictoriaMetrics/fasthttp/fasthttputil"
-)
-
-// StreamWriter must write data to w.
-//
-// Usually StreamWriter writes data to w in a loop (aka 'data streaming').
-//
-// StreamWriter must return immediately if w returns error.
-//
-// Since the written data is buffered, do not forget calling w.Flush
-// when the data must be propagated to reader.
-type StreamWriter func(w *bufio.Writer)
-
-// NewStreamReader returns a reader, which replays all the data generated by sw.
-//
-// The returned reader may be passed to Response.SetBodyStream.
-//
-// Close must be called on the returned reader after all the required data
-// has been read. Otherwise goroutine leak may occur.
-//
-// See also Response.SetBodyStreamWriter.
-func NewStreamReader(sw StreamWriter) io.ReadCloser {
- pc := fasthttputil.NewPipeConns()
- pw := pc.Conn1()
- pr := pc.Conn2()
-
- var bw *bufio.Writer
- v := streamWriterBufPool.Get()
- if v == nil {
- bw = bufio.NewWriter(pw)
- } else {
- bw = v.(*bufio.Writer)
- bw.Reset(pw)
- }
-
- go func() {
- sw(bw)
- bw.Flush()
- pw.Close()
-
- streamWriterBufPool.Put(bw)
- }()
-
- return pr
-}
-
-var streamWriterBufPool sync.Pool
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/strings.go b/vendor/github.com/VictoriaMetrics/fasthttp/strings.go
deleted file mode 100644
index ebfa3edb2..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/strings.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package fasthttp
-
-var (
- defaultServerName = []byte("fasthttp")
- defaultUserAgent = []byte("fasthttp")
- defaultContentType = []byte("text/plain; charset=utf-8")
-)
-
-var (
- strSlash = []byte("/")
- strSlashSlash = []byte("//")
- strSlashDotDot = []byte("/..")
- strSlashDotSlash = []byte("/./")
- strSlashDotDotSlash = []byte("/../")
- strCRLF = []byte("\r\n")
- strHTTP = []byte("http")
- strHTTPS = []byte("https")
- strHTTP11 = []byte("HTTP/1.1")
- strColonSlashSlash = []byte("://")
- strColonSpace = []byte(": ")
- strGMT = []byte("GMT")
-
- strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n")
-
- strGet = []byte("GET")
- strHead = []byte("HEAD")
- strPost = []byte("POST")
- strPut = []byte("PUT")
- strDelete = []byte("DELETE")
-
- strExpect = []byte("Expect")
- strConnection = []byte("Connection")
- strContentLength = []byte("Content-Length")
- strContentType = []byte("Content-Type")
- strDate = []byte("Date")
- strHost = []byte("Host")
- strReferer = []byte("Referer")
- strServer = []byte("Server")
- strTransferEncoding = []byte("Transfer-Encoding")
- strContentEncoding = []byte("Content-Encoding")
- strAcceptEncoding = []byte("Accept-Encoding")
- strUserAgent = []byte("User-Agent")
- strCookie = []byte("Cookie")
- strSetCookie = []byte("Set-Cookie")
- strLocation = []byte("Location")
- strIfModifiedSince = []byte("If-Modified-Since")
- strLastModified = []byte("Last-Modified")
- strAcceptRanges = []byte("Accept-Ranges")
- strRange = []byte("Range")
- strContentRange = []byte("Content-Range")
-
- strCookieExpires = []byte("expires")
- strCookieDomain = []byte("domain")
- strCookiePath = []byte("path")
- strCookieHTTPOnly = []byte("HttpOnly")
- strCookieSecure = []byte("secure")
-
- strClose = []byte("close")
- strGzip = []byte("gzip")
- strDeflate = []byte("deflate")
- strKeepAlive = []byte("keep-alive")
- strKeepAliveCamelCase = []byte("Keep-Alive")
- strUpgrade = []byte("Upgrade")
- strChunked = []byte("chunked")
- strIdentity = []byte("identity")
- str100Continue = []byte("100-continue")
- strPostArgsContentType = []byte("application/x-www-form-urlencoded")
- strMultipartFormData = []byte("multipart/form-data")
- strBoundary = []byte("boundary")
- strBytes = []byte("bytes")
- strTextSlash = []byte("text/")
- strApplicationSlash = []byte("application/")
-)
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go b/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go
deleted file mode 100644
index 0e03482bf..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package fasthttp
-
-import (
- "errors"
- "net"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Dial dials the given TCP addr using tcp4.
-//
-// This function has the following additional features comparing to net.Dial:
-//
-// - It reduces load on DNS resolver by caching resolved TCP addressed
-// for DefaultDNSCacheDuration.
-// - It dials all the resolved TCP addresses in round-robin manner until
-// connection is established. This may be useful if certain addresses
-// are temporarily unreachable.
-// - It returns ErrDialTimeout if connection cannot be established during
-// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout.
-//
-// This dialer is intended for custom code wrapping before passing
-// to Client.Dial or HostClient.Dial.
-//
-// For instance, per-host counters and/or limits may be implemented
-// by such wrappers.
-//
-// The addr passed to the function must contain port. Example addr values:
-//
-// - foobar.baz:443
-// - foo.bar:80
-// - aaa.com:8080
-func Dial(addr string) (net.Conn, error) {
- return getDialer(DefaultDialTimeout, false)(addr)
-}
-
-// DialTimeout dials the given TCP addr using tcp4 using the given timeout.
-//
-// This function has the following additional features comparing to net.Dial:
-//
-// - It reduces load on DNS resolver by caching resolved TCP addressed
-// for DefaultDNSCacheDuration.
-// - It dials all the resolved TCP addresses in round-robin manner until
-// connection is established. This may be useful if certain addresses
-// are temporarily unreachable.
-//
-// This dialer is intended for custom code wrapping before passing
-// to Client.Dial or HostClient.Dial.
-//
-// For instance, per-host counters and/or limits may be implemented
-// by such wrappers.
-//
-// The addr passed to the function must contain port. Example addr values:
-//
-// - foobar.baz:443
-// - foo.bar:80
-// - aaa.com:8080
-func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) {
- return getDialer(timeout, false)(addr)
-}
-
-// DialDualStack dials the given TCP addr using both tcp4 and tcp6.
-//
-// This function has the following additional features comparing to net.Dial:
-//
-// - It reduces load on DNS resolver by caching resolved TCP addressed
-// for DefaultDNSCacheDuration.
-// - It dials all the resolved TCP addresses in round-robin manner until
-// connection is established. This may be useful if certain addresses
-// are temporarily unreachable.
-// - It returns ErrDialTimeout if connection cannot be established during
-// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial
-// timeout.
-//
-// This dialer is intended for custom code wrapping before passing
-// to Client.Dial or HostClient.Dial.
-//
-// For instance, per-host counters and/or limits may be implemented
-// by such wrappers.
-//
-// The addr passed to the function must contain port. Example addr values:
-//
-// - foobar.baz:443
-// - foo.bar:80
-// - aaa.com:8080
-func DialDualStack(addr string) (net.Conn, error) {
- return getDialer(DefaultDialTimeout, true)(addr)
-}
-
-// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6
-// using the given timeout.
-//
-// This function has the following additional features comparing to net.Dial:
-//
-// - It reduces load on DNS resolver by caching resolved TCP addressed
-// for DefaultDNSCacheDuration.
-// - It dials all the resolved TCP addresses in round-robin manner until
-// connection is established. This may be useful if certain addresses
-// are temporarily unreachable.
-//
-// This dialer is intended for custom code wrapping before passing
-// to Client.Dial or HostClient.Dial.
-//
-// For instance, per-host counters and/or limits may be implemented
-// by such wrappers.
-//
-// The addr passed to the function must contain port. Example addr values:
-//
-// - foobar.baz:443
-// - foo.bar:80
-// - aaa.com:8080
-func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) {
- return getDialer(timeout, true)(addr)
-}
-
-func getDialer(timeout time.Duration, dualStack bool) DialFunc {
- if timeout <= 0 {
- timeout = DefaultDialTimeout
- }
- timeoutRounded := int(timeout.Seconds()*10 + 9)
-
- m := dialMap
- if dualStack {
- m = dialDualStackMap
- }
-
- dialMapLock.Lock()
- d := m[timeoutRounded]
- if d == nil {
- dialer := dialerStd
- if dualStack {
- dialer = dialerDualStack
- }
- d = dialer.NewDial(timeout)
- m[timeoutRounded] = d
- }
- dialMapLock.Unlock()
- return d
-}
-
-var (
- dialerStd = &tcpDialer{}
- dialerDualStack = &tcpDialer{DualStack: true}
-
- dialMap = make(map[int]DialFunc)
- dialDualStackMap = make(map[int]DialFunc)
- dialMapLock sync.Mutex
-)
-
-type tcpDialer struct {
- DualStack bool
-
- tcpAddrsLock sync.Mutex
- tcpAddrsMap map[string]*tcpAddrEntry
-
- concurrencyCh chan struct{}
-
- once sync.Once
-}
-
-const maxDialConcurrency = 1000
-
-func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc {
- d.once.Do(func() {
- d.concurrencyCh = make(chan struct{}, maxDialConcurrency)
- d.tcpAddrsMap = make(map[string]*tcpAddrEntry)
- go d.tcpAddrsClean()
- })
-
- return func(addr string) (net.Conn, error) {
- addrs, idx, err := d.getTCPAddrs(addr)
- if err != nil {
- return nil, err
- }
- network := "tcp4"
- if d.DualStack {
- network = "tcp"
- }
-
- var conn net.Conn
- n := uint32(len(addrs))
- deadline := time.Now().Add(timeout)
- for n > 0 {
- conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh)
- if err == nil {
- return conn, nil
- }
- if err == ErrDialTimeout {
- return nil, err
- }
- idx++
- n--
- }
- return nil, err
- }
-}
-
-func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) {
- timeout := -time.Since(deadline)
- if timeout <= 0 {
- return nil, ErrDialTimeout
- }
-
- select {
- case concurrencyCh <- struct{}{}:
- default:
- tc := acquireTimer(timeout)
- isTimeout := false
- select {
- case concurrencyCh <- struct{}{}:
- case <-tc.C:
- isTimeout = true
- }
- releaseTimer(tc)
- if isTimeout {
- return nil, ErrDialTimeout
- }
- }
-
- timeout = -time.Since(deadline)
- if timeout <= 0 {
- <-concurrencyCh
- return nil, ErrDialTimeout
- }
-
- chv := dialResultChanPool.Get()
- if chv == nil {
- chv = make(chan dialResult, 1)
- }
- ch := chv.(chan dialResult)
- go func() {
- var dr dialResult
- dr.conn, dr.err = net.DialTCP(network, nil, addr)
- ch <- dr
- <-concurrencyCh
- }()
-
- var (
- conn net.Conn
- err error
- )
-
- tc := acquireTimer(timeout)
- select {
- case dr := <-ch:
- conn = dr.conn
- err = dr.err
- dialResultChanPool.Put(ch)
- case <-tc.C:
- err = ErrDialTimeout
- }
- releaseTimer(tc)
-
- return conn, err
-}
-
-var dialResultChanPool sync.Pool
-
-type dialResult struct {
- conn net.Conn
- err error
-}
-
-// ErrDialTimeout is returned when TCP dialing is timed out.
-var ErrDialTimeout = errors.New("dialing to the given TCP address timed out")
-
-// DefaultDialTimeout is timeout used by Dial and DialDualStack
-// for establishing TCP connections.
-const DefaultDialTimeout = 3 * time.Second
-
-type tcpAddrEntry struct {
- addrs []net.TCPAddr
- addrsIdx uint32
-
- resolveTime time.Time
- pending bool
-}
-
-// DefaultDNSCacheDuration is the duration for caching resolved TCP addresses
-// by Dial* functions.
-const DefaultDNSCacheDuration = time.Minute
-
-func (d *tcpDialer) tcpAddrsClean() {
- expireDuration := 2 * DefaultDNSCacheDuration
- for {
- time.Sleep(time.Second)
- t := time.Now()
-
- d.tcpAddrsLock.Lock()
- for k, e := range d.tcpAddrsMap {
- if t.Sub(e.resolveTime) > expireDuration {
- delete(d.tcpAddrsMap, k)
- }
- }
- d.tcpAddrsLock.Unlock()
- }
-}
-
-func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) {
- d.tcpAddrsLock.Lock()
- e := d.tcpAddrsMap[addr]
- if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration {
- e.pending = true
- e = nil
- }
- d.tcpAddrsLock.Unlock()
-
- if e == nil {
- addrs, err := resolveTCPAddrs(addr, d.DualStack)
- if err != nil {
- d.tcpAddrsLock.Lock()
- e = d.tcpAddrsMap[addr]
- if e != nil && e.pending {
- e.pending = false
- }
- d.tcpAddrsLock.Unlock()
- return nil, 0, err
- }
-
- e = &tcpAddrEntry{
- addrs: addrs,
- resolveTime: time.Now(),
- }
-
- d.tcpAddrsLock.Lock()
- d.tcpAddrsMap[addr] = e
- d.tcpAddrsLock.Unlock()
- }
-
- idx := atomic.AddUint32(&e.addrsIdx, 1)
- return e.addrs, idx, nil
-}
-
-func resolveTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, error) {
- host, portS, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- port, err := strconv.Atoi(portS)
- if err != nil {
- return nil, err
- }
-
- ips, err := net.LookupIP(host)
- if err != nil {
- return nil, err
- }
-
- n := len(ips)
- addrs := make([]net.TCPAddr, 0, n)
- for i := 0; i < n; i++ {
- ip := ips[i]
- if !dualStack && ip.To4() == nil {
- continue
- }
- addrs = append(addrs, net.TCPAddr{
- IP: ip,
- Port: port,
- })
- }
- if len(addrs) == 0 {
- return nil, errNoDNSEntries
- }
- return addrs, nil
-}
-
-var errNoDNSEntries = errors.New("couldn't find DNS entries for the given domain. Try using DialDualStack")
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/timer.go b/vendor/github.com/VictoriaMetrics/fasthttp/timer.go
deleted file mode 100644
index bb12acb7e..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/timer.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package fasthttp
-
-import (
- "sync"
- "time"
-)
-
-func initTimer(t *time.Timer, timeout time.Duration) *time.Timer {
- if t == nil {
- return time.NewTimer(timeout)
- }
- if t.Reset(timeout) {
- panic("BUG: active timer trapped into initTimer()")
- }
- return t
-}
-
-func stopTimer(t *time.Timer) {
- if !t.Stop() {
- // Collect possibly added time from the channel
- // if timer has been stopped and nobody collected its' value.
- select {
- case <-t.C:
- default:
- }
- }
-}
-
-func acquireTimer(timeout time.Duration) *time.Timer {
- v := timerPool.Get()
- if v == nil {
- return time.NewTimer(timeout)
- }
- t := v.(*time.Timer)
- initTimer(t, timeout)
- return t
-}
-
-func releaseTimer(t *time.Timer) {
- stopTimer(t)
- timerPool.Put(t)
-}
-
-var timerPool sync.Pool
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/uri.go b/vendor/github.com/VictoriaMetrics/fasthttp/uri.go
deleted file mode 100644
index 504eb663b..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/uri.go
+++ /dev/null
@@ -1,525 +0,0 @@
-package fasthttp
-
-import (
- "bytes"
- "io"
- "sync"
-)
-
-// AcquireURI returns an empty URI instance from the pool.
-//
-// Release the URI with ReleaseURI after the URI is no longer needed.
-// This allows reducing GC load.
-func AcquireURI() *URI {
- return uriPool.Get().(*URI)
-}
-
-// ReleaseURI releases the URI acquired via AcquireURI.
-//
-// The released URI mustn't be used after releasing it, otherwise data races
-// may occur.
-func ReleaseURI(u *URI) {
- u.Reset()
- uriPool.Put(u)
-}
-
-var uriPool = &sync.Pool{
- New: func() interface{} {
- return &URI{}
- },
-}
-
-// URI represents URI :) .
-//
-// It is forbidden copying URI instances. Create new instance and use CopyTo
-// instead.
-//
-// URI instance MUST NOT be used from concurrently running goroutines.
-type URI struct {
- noCopy noCopy
-
- pathOriginal []byte
- scheme []byte
- path []byte
- queryString []byte
- hash []byte
- host []byte
-
- queryArgs Args
- parsedQueryArgs bool
-
- fullURI []byte
- requestURI []byte
-
- h *RequestHeader
-}
-
-// CopyTo copies uri contents to dst.
-func (u *URI) CopyTo(dst *URI) {
- dst.Reset()
- dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...)
- dst.scheme = append(dst.scheme[:0], u.scheme...)
- dst.path = append(dst.path[:0], u.path...)
- dst.queryString = append(dst.queryString[:0], u.queryString...)
- dst.hash = append(dst.hash[:0], u.hash...)
- dst.host = append(dst.host[:0], u.host...)
-
- u.queryArgs.CopyTo(&dst.queryArgs)
- dst.parsedQueryArgs = u.parsedQueryArgs
-
- // fullURI and requestURI shouldn't be copied, since they are created
- // from scratch on each FullURI() and RequestURI() call.
- dst.h = u.h
-}
-
-// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe .
-//
-// The returned value is valid until the next URI method call.
-func (u *URI) Hash() []byte {
- return u.hash
-}
-
-// SetHash sets URI hash.
-func (u *URI) SetHash(hash string) {
- u.hash = append(u.hash[:0], hash...)
-}
-
-// SetHashBytes sets URI hash.
-func (u *URI) SetHashBytes(hash []byte) {
- u.hash = append(u.hash[:0], hash...)
-}
-
-// QueryString returns URI query string,
-// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe .
-//
-// The returned value is valid until the next URI method call.
-func (u *URI) QueryString() []byte {
- return u.queryString
-}
-
-// SetQueryString sets URI query string.
-func (u *URI) SetQueryString(queryString string) {
- u.queryString = append(u.queryString[:0], queryString...)
- u.parsedQueryArgs = false
-}
-
-// SetQueryStringBytes sets URI query string.
-func (u *URI) SetQueryStringBytes(queryString []byte) {
- u.queryString = append(u.queryString[:0], queryString...)
- u.parsedQueryArgs = false
-}
-
-// Path returns URI path, i.e. /foo/bar of http://aaa.com/foo/bar?baz=123#qwe .
-//
-// The returned path is always urldecoded and normalized,
-// i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'.
-//
-// The returned value is valid until the next URI method call.
-func (u *URI) Path() []byte {
- path := u.path
- if len(path) == 0 {
- path = strSlash
- }
- return path
-}
-
-// SetPath sets URI path.
-func (u *URI) SetPath(path string) {
- u.pathOriginal = append(u.pathOriginal[:0], path...)
- u.path = normalizePath(u.path, u.pathOriginal)
-}
-
-// SetPathBytes sets URI path.
-func (u *URI) SetPathBytes(path []byte) {
- u.pathOriginal = append(u.pathOriginal[:0], path...)
- u.path = normalizePath(u.path, u.pathOriginal)
-}
-
-// PathOriginal returns the original path from requestURI passed to URI.Parse().
-//
-// The returned value is valid until the next URI method call.
-func (u *URI) PathOriginal() []byte {
- return u.pathOriginal
-}
-
-// Scheme returns URI scheme, i.e. http of http://aaa.com/foo/bar?baz=123#qwe .
-//
-// Returned scheme is always lowercased.
-//
-// The returned value is valid until the next URI method call.
-func (u *URI) Scheme() []byte {
- scheme := u.scheme
- if len(scheme) == 0 {
- scheme = strHTTP
- }
- return scheme
-}
-
-// SetScheme sets URI scheme, i.e. http, https, ftp, etc.
-func (u *URI) SetScheme(scheme string) {
- u.scheme = append(u.scheme[:0], scheme...)
- lowercaseBytes(u.scheme)
-}
-
-// SetSchemeBytes sets URI scheme, i.e. http, https, ftp, etc.
-func (u *URI) SetSchemeBytes(scheme []byte) {
- u.scheme = append(u.scheme[:0], scheme...)
- lowercaseBytes(u.scheme)
-}
-
-// Reset clears uri.
-func (u *URI) Reset() {
- u.pathOriginal = u.pathOriginal[:0]
- u.scheme = u.scheme[:0]
- u.path = u.path[:0]
- u.queryString = u.queryString[:0]
- u.hash = u.hash[:0]
-
- u.host = u.host[:0]
- u.queryArgs.Reset()
- u.parsedQueryArgs = false
-
- // There is no need in u.fullURI = u.fullURI[:0], since full uri
- // is calucalted on each call to FullURI().
-
- // There is no need in u.requestURI = u.requestURI[:0], since requestURI
- // is calculated on each call to RequestURI().
-
- u.h = nil
-}
-
-// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe .
-//
-// Host is always lowercased.
-func (u *URI) Host() []byte {
- if len(u.host) == 0 && u.h != nil {
- u.host = append(u.host[:0], u.h.Host()...)
- lowercaseBytes(u.host)
- u.h = nil
- }
- return u.host
-}
-
-// SetHost sets host for the uri.
-func (u *URI) SetHost(host string) {
- u.host = append(u.host[:0], host...)
- lowercaseBytes(u.host)
-}
-
-// SetHostBytes sets host for the uri.
-func (u *URI) SetHostBytes(host []byte) {
- u.host = append(u.host[:0], host...)
- lowercaseBytes(u.host)
-}
-
-// Parse initializes URI from the given host and uri.
-//
-// host may be nil. In this case uri must contain fully qualified uri,
-// i.e. with scheme and host. http is assumed if scheme is omitted.
-//
-// uri may contain e.g. RequestURI without scheme and host if host is non-empty.
-func (u *URI) Parse(host, uri []byte) {
- u.parse(host, uri, nil)
-}
-
-func (u *URI) parseQuick(uri []byte, h *RequestHeader, isTLS bool) {
- u.parse(nil, uri, h)
- if isTLS {
- u.scheme = append(u.scheme[:0], strHTTPS...)
- }
-}
-
-func (u *URI) parse(host, uri []byte, h *RequestHeader) {
- u.Reset()
- u.h = h
-
- scheme, host, uri := splitHostURI(host, uri)
- u.scheme = append(u.scheme, scheme...)
- lowercaseBytes(u.scheme)
- u.host = append(u.host, host...)
- lowercaseBytes(u.host)
-
- b := uri
- queryIndex := bytes.IndexByte(b, '?')
- fragmentIndex := bytes.IndexByte(b, '#')
- // Ignore query in fragment part
- if fragmentIndex >= 0 && queryIndex > fragmentIndex {
- queryIndex = -1
- }
-
- if queryIndex < 0 && fragmentIndex < 0 {
- u.pathOriginal = append(u.pathOriginal, b...)
- u.path = normalizePath(u.path, u.pathOriginal)
- return
- }
-
- if queryIndex >= 0 {
- // Path is everything up to the start of the query
- u.pathOriginal = append(u.pathOriginal, b[:queryIndex]...)
- u.path = normalizePath(u.path, u.pathOriginal)
-
- if fragmentIndex < 0 {
- u.queryString = append(u.queryString, b[queryIndex+1:]...)
- } else {
- u.queryString = append(u.queryString, b[queryIndex+1:fragmentIndex]...)
- u.hash = append(u.hash, b[fragmentIndex+1:]...)
- }
- return
- }
-
- // fragmentIndex >= 0 && queryIndex < 0
- // Path is up to the start of fragment
- u.pathOriginal = append(u.pathOriginal, b[:fragmentIndex]...)
- u.path = normalizePath(u.path, u.pathOriginal)
- u.hash = append(u.hash, b[fragmentIndex+1:]...)
-}
-
-func normalizePath(dst, src []byte) []byte {
- dst = dst[:0]
- dst = addLeadingSlash(dst, src)
- dst = decodeArgAppendNoPlus(dst, src)
-
- // remove duplicate slashes
- b := dst
- bSize := len(b)
- for {
- n := bytes.Index(b, strSlashSlash)
- if n < 0 {
- break
- }
- b = b[n:]
- copy(b, b[1:])
- b = b[:len(b)-1]
- bSize--
- }
- dst = dst[:bSize]
-
- // remove /./ parts
- b = dst
- for {
- n := bytes.Index(b, strSlashDotSlash)
- if n < 0 {
- break
- }
- nn := n + len(strSlashDotSlash) - 1
- copy(b[n:], b[nn:])
- b = b[:len(b)-nn+n]
- }
-
- // remove /foo/../ parts
- for {
- n := bytes.Index(b, strSlashDotDotSlash)
- if n < 0 {
- break
- }
- nn := bytes.LastIndexByte(b[:n], '/')
- if nn < 0 {
- nn = 0
- }
- n += len(strSlashDotDotSlash) - 1
- copy(b[nn:], b[n:])
- b = b[:len(b)-n+nn]
- }
-
- // remove trailing /foo/..
- n := bytes.LastIndex(b, strSlashDotDot)
- if n >= 0 && n+len(strSlashDotDot) == len(b) {
- nn := bytes.LastIndexByte(b[:n], '/')
- if nn < 0 {
- return strSlash
- }
- b = b[:nn+1]
- }
-
- return b
-}
-
-// RequestURI returns RequestURI - i.e. URI without Scheme and Host.
-func (u *URI) RequestURI() []byte {
- dst := appendQuotedPath(u.requestURI[:0], u.Path())
- if u.queryArgs.Len() > 0 {
- dst = append(dst, '?')
- dst = u.queryArgs.AppendBytes(dst)
- } else if len(u.queryString) > 0 {
- dst = append(dst, '?')
- dst = append(dst, u.queryString...)
- }
- if len(u.hash) > 0 {
- dst = append(dst, '#')
- dst = append(dst, u.hash...)
- }
- u.requestURI = dst
- return u.requestURI
-}
-
-// LastPathSegment returns the last part of uri path after '/'.
-//
-// Examples:
-//
-// - For /foo/bar/baz.html path returns baz.html.
-// - For /foo/bar/ returns empty byte slice.
-// - For /foobar.js returns foobar.js.
-func (u *URI) LastPathSegment() []byte {
- path := u.Path()
- n := bytes.LastIndexByte(path, '/')
- if n < 0 {
- return path
- }
- return path[n+1:]
-}
-
-// Update updates uri.
-//
-// The following newURI types are accepted:
-//
-// - Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original
-// uri is replaced by newURI.
-// - Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case
-// the original scheme is preserved.
-// - Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part
-// of the original uri is replaced.
-// - Relative path, i.e. xx?yy=abc . In this case the original RequestURI
-// is updated according to the new relative path.
-func (u *URI) Update(newURI string) {
- u.UpdateBytes(s2b(newURI))
-}
-
-// UpdateBytes updates uri.
-//
-// The following newURI types are accepted:
-//
-// - Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original
-// uri is replaced by newURI.
-// - Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case
-// the original scheme is preserved.
-// - Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part
-// of the original uri is replaced.
-// - Relative path, i.e. xx?yy=abc . In this case the original RequestURI
-// is updated according to the new relative path.
-func (u *URI) UpdateBytes(newURI []byte) {
- u.requestURI = u.updateBytes(newURI, u.requestURI)
-}
-
-func (u *URI) updateBytes(newURI, buf []byte) []byte {
- if len(newURI) == 0 {
- return buf
- }
-
- n := bytes.Index(newURI, strSlashSlash)
- if n >= 0 {
- // absolute uri
- var b [32]byte
- schemeOriginal := b[:0]
- if len(u.scheme) > 0 {
- schemeOriginal = append([]byte(nil), u.scheme...)
- }
- u.Parse(nil, newURI)
- if len(schemeOriginal) > 0 && len(u.scheme) == 0 {
- u.scheme = append(u.scheme[:0], schemeOriginal...)
- }
- return buf
- }
-
- if newURI[0] == '/' {
- // uri without host
- buf = u.appendSchemeHost(buf[:0])
- buf = append(buf, newURI...)
- u.Parse(nil, buf)
- return buf
- }
-
- // relative path
- switch newURI[0] {
- case '?':
- // query string only update
- u.SetQueryStringBytes(newURI[1:])
- return append(buf[:0], u.FullURI()...)
- case '#':
- // update only hash
- u.SetHashBytes(newURI[1:])
- return append(buf[:0], u.FullURI()...)
- default:
- // update the last path part after the slash
- path := u.Path()
- n = bytes.LastIndexByte(path, '/')
- if n < 0 {
- panic("BUG: path must contain at least one slash")
- }
- buf = u.appendSchemeHost(buf[:0])
- buf = appendQuotedPath(buf, path[:n+1])
- buf = append(buf, newURI...)
- u.Parse(nil, buf)
- return buf
- }
-}
-
-// FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}.
-func (u *URI) FullURI() []byte {
- u.fullURI = u.AppendBytes(u.fullURI[:0])
- return u.fullURI
-}
-
-// AppendBytes appends full uri to dst and returns the extended dst.
-func (u *URI) AppendBytes(dst []byte) []byte {
- dst = u.appendSchemeHost(dst)
- return append(dst, u.RequestURI()...)
-}
-
-func (u *URI) appendSchemeHost(dst []byte) []byte {
- dst = append(dst, u.Scheme()...)
- dst = append(dst, strColonSlashSlash...)
- return append(dst, u.Host()...)
-}
-
-// WriteTo writes full uri to w.
-//
-// WriteTo implements io.WriterTo interface.
-func (u *URI) WriteTo(w io.Writer) (int64, error) {
- n, err := w.Write(u.FullURI())
- return int64(n), err
-}
-
-// String returns full uri.
-func (u *URI) String() string {
- return string(u.FullURI())
-}
-
-func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) {
- n := bytes.Index(uri, strSlashSlash)
- if n < 0 {
- return strHTTP, host, uri
- }
- scheme := uri[:n]
- if bytes.IndexByte(scheme, '/') >= 0 {
- return strHTTP, host, uri
- }
- if len(scheme) > 0 && scheme[len(scheme)-1] == ':' {
- scheme = scheme[:len(scheme)-1]
- }
- n += len(strSlashSlash)
- uri = uri[n:]
- n = bytes.IndexByte(uri, '/')
- if n < 0 {
- // A hack for bogus urls like foobar.com?a=b without
- // slash after host.
- if n = bytes.IndexByte(uri, '?'); n >= 0 {
- return scheme, uri[:n], uri[n:]
- }
- return scheme, uri, strSlash
- }
- return scheme, uri[:n], uri[n:]
-}
-
-// QueryArgs returns query args.
-func (u *URI) QueryArgs() *Args {
- u.parseQueryArgs()
- return &u.queryArgs
-}
-
-func (u *URI) parseQueryArgs() {
- if u.parsedQueryArgs {
- return
- }
- u.queryArgs.ParseBytes(u.queryString)
- u.parsedQueryArgs = true
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/uri_unix.go b/vendor/github.com/VictoriaMetrics/fasthttp/uri_unix.go
deleted file mode 100644
index c2ac8fa46..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/uri_unix.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package fasthttp
-
-func addLeadingSlash(dst, src []byte) []byte {
- // add leading slash for unix paths
- if len(src) == 0 || src[0] != '/' {
- dst = append(dst, '/')
- }
-
- return dst
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/uri_windows.go b/vendor/github.com/VictoriaMetrics/fasthttp/uri_windows.go
deleted file mode 100644
index e1391a7ac..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/uri_windows.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build windows
-// +build windows
-
-package fasthttp
-
-func addLeadingSlash(dst, src []byte) []byte {
- // zero length and "C:/" case
- if len(src) == 0 || (len(src) > 2 && src[1] != ':') {
- dst = append(dst, '/')
- }
-
- return dst
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/userdata.go b/vendor/github.com/VictoriaMetrics/fasthttp/userdata.go
deleted file mode 100644
index bd3e28aa1..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/userdata.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package fasthttp
-
-import (
- "io"
-)
-
-type userDataKV struct {
- key []byte
- value interface{}
-}
-
-type userData []userDataKV
-
-func (d *userData) Set(key string, value interface{}) {
- args := *d
- n := len(args)
- for i := 0; i < n; i++ {
- kv := &args[i]
- if string(kv.key) == key {
- kv.value = value
- return
- }
- }
-
- c := cap(args)
- if c > n {
- args = args[:n+1]
- kv := &args[n]
- kv.key = append(kv.key[:0], key...)
- kv.value = value
- *d = args
- return
- }
-
- kv := userDataKV{}
- kv.key = append(kv.key[:0], key...)
- kv.value = value
- *d = append(args, kv)
-}
-
-func (d *userData) SetBytes(key []byte, value interface{}) {
- d.Set(b2s(key), value)
-}
-
-func (d *userData) Get(key string) interface{} {
- args := *d
- n := len(args)
- for i := 0; i < n; i++ {
- kv := &args[i]
- if string(kv.key) == key {
- return kv.value
- }
- }
- return nil
-}
-
-func (d *userData) GetBytes(key []byte) interface{} {
- return d.Get(b2s(key))
-}
-
-func (d *userData) Reset() {
- args := *d
- n := len(args)
- for i := 0; i < n; i++ {
- v := args[i].value
- if vc, ok := v.(io.Closer); ok {
- vc.Close()
- }
- }
- *d = (*d)[:0]
-}
diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/workerpool.go b/vendor/github.com/VictoriaMetrics/fasthttp/workerpool.go
deleted file mode 100644
index 081ac65c3..000000000
--- a/vendor/github.com/VictoriaMetrics/fasthttp/workerpool.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package fasthttp
-
-import (
- "net"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-// workerPool serves incoming connections via a pool of workers
-// in FILO order, i.e. the most recently stopped worker will serve the next
-// incoming connection.
-//
-// Such a scheme keeps CPU caches hot (in theory).
-type workerPool struct {
- // Function for serving server connections.
- // It must leave c unclosed.
- WorkerFunc func(c net.Conn) error
-
- MaxWorkersCount int
-
- LogAllErrors bool
-
- MaxIdleWorkerDuration time.Duration
-
- Logger Logger
-
- lock sync.Mutex
- workersCount int
- mustStop bool
-
- ready []*workerChan
-
- stopCh chan struct{}
-
- workerChanPool sync.Pool
-}
-
-type workerChan struct {
- lastUseTime time.Time
- ch chan net.Conn
-}
-
-func (wp *workerPool) Start() {
- if wp.stopCh != nil {
- panic("BUG: workerPool already started")
- }
- wp.stopCh = make(chan struct{})
- stopCh := wp.stopCh
- go func() {
- var scratch []*workerChan
- for {
- wp.clean(&scratch)
- select {
- case <-stopCh:
- return
- default:
- time.Sleep(wp.getMaxIdleWorkerDuration())
- }
- }
- }()
-}
-
-func (wp *workerPool) Stop() {
- if wp.stopCh == nil {
- panic("BUG: workerPool wasn't started")
- }
- close(wp.stopCh)
- wp.stopCh = nil
-
- // Stop all the workers waiting for incoming connections.
- // Do not wait for busy workers - they will stop after
- // serving the connection and noticing wp.mustStop = true.
- wp.lock.Lock()
- ready := wp.ready
- for i, ch := range ready {
- ch.ch <- nil
- ready[i] = nil
- }
- wp.ready = ready[:0]
- wp.mustStop = true
- wp.lock.Unlock()
-}
-
-func (wp *workerPool) getMaxIdleWorkerDuration() time.Duration {
- if wp.MaxIdleWorkerDuration <= 0 {
- return 10 * time.Second
- }
- return wp.MaxIdleWorkerDuration
-}
-
-func (wp *workerPool) clean(scratch *[]*workerChan) {
- maxIdleWorkerDuration := wp.getMaxIdleWorkerDuration()
-
- // Clean least recently used workers if they didn't serve connections
- // for more than maxIdleWorkerDuration.
- currentTime := time.Now()
-
- wp.lock.Lock()
- ready := wp.ready
- n := len(ready)
- i := 0
- for i < n && currentTime.Sub(ready[i].lastUseTime) > maxIdleWorkerDuration {
- i++
- }
- *scratch = append((*scratch)[:0], ready[:i]...)
- if i > 0 {
- m := copy(ready, ready[i:])
- for i = m; i < n; i++ {
- ready[i] = nil
- }
- wp.ready = ready[:m]
- }
- wp.lock.Unlock()
-
- // Notify obsolete workers to stop.
- // This notification must be outside the wp.lock, since ch.ch
- // may be blocking and may consume a lot of time if many workers
- // are located on non-local CPUs.
- tmp := *scratch
- for i, ch := range tmp {
- ch.ch <- nil
- tmp[i] = nil
- }
-}
-
-func (wp *workerPool) Serve(c net.Conn) bool {
- ch := wp.getCh()
- if ch == nil {
- return false
- }
- ch.ch <- c
- return true
-}
-
-var workerChanCap = func() int {
- // Use blocking workerChan if GOMAXPROCS=1.
- // This immediately switches Serve to WorkerFunc, which results
- // in higher performance (under go1.5 at least).
- if runtime.GOMAXPROCS(0) == 1 {
- return 0
- }
-
- // Use non-blocking workerChan if GOMAXPROCS>1,
- // since otherwise the Serve caller (Acceptor) may lag accepting
- // new connections if WorkerFunc is CPU-bound.
- return 1
-}()
-
-func (wp *workerPool) getCh() *workerChan {
- var ch *workerChan
- createWorker := false
-
- wp.lock.Lock()
- ready := wp.ready
- n := len(ready) - 1
- if n < 0 {
- if wp.workersCount < wp.MaxWorkersCount {
- createWorker = true
- wp.workersCount++
- }
- } else {
- ch = ready[n]
- ready[n] = nil
- wp.ready = ready[:n]
- }
- wp.lock.Unlock()
-
- if ch == nil {
- if !createWorker {
- return nil
- }
- vch := wp.workerChanPool.Get()
- if vch == nil {
- vch = &workerChan{
- ch: make(chan net.Conn, workerChanCap),
- }
- }
- ch = vch.(*workerChan)
- go func() {
- wp.workerFunc(ch)
- wp.workerChanPool.Put(vch)
- }()
- }
- return ch
-}
-
-func (wp *workerPool) release(ch *workerChan) bool {
- ch.lastUseTime = time.Now()
- wp.lock.Lock()
- if wp.mustStop {
- wp.lock.Unlock()
- return false
- }
- wp.ready = append(wp.ready, ch)
- wp.lock.Unlock()
- return true
-}
-
-func (wp *workerPool) workerFunc(ch *workerChan) {
- var c net.Conn
-
- var err error
- for c = range ch.ch {
- if c == nil {
- break
- }
-
- if err = wp.WorkerFunc(c); err != nil && err != errHijacked {
- errStr := err.Error()
- if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") ||
- strings.Contains(errStr, "reset by peer") ||
- strings.Contains(errStr, "i/o timeout")) {
- wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
- }
- }
- if err != errHijacked {
- c.Close()
- }
- c = nil
-
- if !wp.release(ch) {
- break
- }
- }
-
- wp.lock.Lock()
- wp.workersCount--
- wp.lock.Unlock()
-}
diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go
deleted file mode 100644
index 3d6f516a5..000000000
--- a/vendor/golang.org/x/net/internal/socks/client.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package socks
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "strconv"
- "time"
-)
-
-var (
- noDeadline = time.Time{}
- aLongTimeAgo = time.Unix(1, 0)
-)
-
-func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) {
- host, port, err := splitHostPort(address)
- if err != nil {
- return nil, err
- }
- if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
- c.SetDeadline(deadline)
- defer c.SetDeadline(noDeadline)
- }
- if ctx != context.Background() {
- errCh := make(chan error, 1)
- done := make(chan struct{})
- defer func() {
- close(done)
- if ctxErr == nil {
- ctxErr = <-errCh
- }
- }()
- go func() {
- select {
- case <-ctx.Done():
- c.SetDeadline(aLongTimeAgo)
- errCh <- ctx.Err()
- case <-done:
- errCh <- nil
- }
- }()
- }
-
- b := make([]byte, 0, 6+len(host)) // the size here is just an estimate
- b = append(b, Version5)
- if len(d.AuthMethods) == 0 || d.Authenticate == nil {
- b = append(b, 1, byte(AuthMethodNotRequired))
- } else {
- ams := d.AuthMethods
- if len(ams) > 255 {
- return nil, errors.New("too many authentication methods")
- }
- b = append(b, byte(len(ams)))
- for _, am := range ams {
- b = append(b, byte(am))
- }
- }
- if _, ctxErr = c.Write(b); ctxErr != nil {
- return
- }
-
- if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil {
- return
- }
- if b[0] != Version5 {
- return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
- }
- am := AuthMethod(b[1])
- if am == AuthMethodNoAcceptableMethods {
- return nil, errors.New("no acceptable authentication methods")
- }
- if d.Authenticate != nil {
- if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil {
- return
- }
- }
-
- b = b[:0]
- b = append(b, Version5, byte(d.cmd), 0)
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- b = append(b, AddrTypeIPv4)
- b = append(b, ip4...)
- } else if ip6 := ip.To16(); ip6 != nil {
- b = append(b, AddrTypeIPv6)
- b = append(b, ip6...)
- } else {
- return nil, errors.New("unknown address type")
- }
- } else {
- if len(host) > 255 {
- return nil, errors.New("FQDN too long")
- }
- b = append(b, AddrTypeFQDN)
- b = append(b, byte(len(host)))
- b = append(b, host...)
- }
- b = append(b, byte(port>>8), byte(port))
- if _, ctxErr = c.Write(b); ctxErr != nil {
- return
- }
-
- if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil {
- return
- }
- if b[0] != Version5 {
- return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
- }
- if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded {
- return nil, errors.New("unknown error " + cmdErr.String())
- }
- if b[2] != 0 {
- return nil, errors.New("non-zero reserved field")
- }
- l := 2
- var a Addr
- switch b[3] {
- case AddrTypeIPv4:
- l += net.IPv4len
- a.IP = make(net.IP, net.IPv4len)
- case AddrTypeIPv6:
- l += net.IPv6len
- a.IP = make(net.IP, net.IPv6len)
- case AddrTypeFQDN:
- if _, err := io.ReadFull(c, b[:1]); err != nil {
- return nil, err
- }
- l += int(b[0])
- default:
- return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3])))
- }
- if cap(b) < l {
- b = make([]byte, l)
- } else {
- b = b[:l]
- }
- if _, ctxErr = io.ReadFull(c, b); ctxErr != nil {
- return
- }
- if a.IP != nil {
- copy(a.IP, b)
- } else {
- a.Name = string(b[:len(b)-2])
- }
- a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1])
- return &a, nil
-}
-
-func splitHostPort(address string) (string, int, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return "", 0, err
- }
- portnum, err := strconv.Atoi(port)
- if err != nil {
- return "", 0, err
- }
- if 1 > portnum || portnum > 0xffff {
- return "", 0, errors.New("port number out of range " + port)
- }
- return host, portnum, nil
-}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
deleted file mode 100644
index 84fcc32b6..000000000
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package socks provides a SOCKS version 5 client implementation.
-//
-// SOCKS protocol version 5 is defined in RFC 1928.
-// Username/Password authentication for SOCKS version 5 is defined in
-// RFC 1929.
-package socks
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "strconv"
-)
-
-// A Command represents a SOCKS command.
-type Command int
-
-func (cmd Command) String() string {
- switch cmd {
- case CmdConnect:
- return "socks connect"
- case cmdBind:
- return "socks bind"
- default:
- return "socks " + strconv.Itoa(int(cmd))
- }
-}
-
-// An AuthMethod represents a SOCKS authentication method.
-type AuthMethod int
-
-// A Reply represents a SOCKS command reply code.
-type Reply int
-
-func (code Reply) String() string {
- switch code {
- case StatusSucceeded:
- return "succeeded"
- case 0x01:
- return "general SOCKS server failure"
- case 0x02:
- return "connection not allowed by ruleset"
- case 0x03:
- return "network unreachable"
- case 0x04:
- return "host unreachable"
- case 0x05:
- return "connection refused"
- case 0x06:
- return "TTL expired"
- case 0x07:
- return "command not supported"
- case 0x08:
- return "address type not supported"
- default:
- return "unknown code: " + strconv.Itoa(int(code))
- }
-}
-
-// Wire protocol constants.
-const (
- Version5 = 0x05
-
- AddrTypeIPv4 = 0x01
- AddrTypeFQDN = 0x03
- AddrTypeIPv6 = 0x04
-
- CmdConnect Command = 0x01 // establishes an active-open forward proxy connection
- cmdBind Command = 0x02 // establishes a passive-open forward proxy connection
-
- AuthMethodNotRequired AuthMethod = 0x00 // no authentication required
- AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password
- AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods
-
- StatusSucceeded Reply = 0x00
-)
-
-// An Addr represents a SOCKS-specific address.
-// Either Name or IP is used exclusively.
-type Addr struct {
- Name string // fully-qualified domain name
- IP net.IP
- Port int
-}
-
-func (a *Addr) Network() string { return "socks" }
-
-func (a *Addr) String() string {
- if a == nil {
- return ""
- }
- port := strconv.Itoa(a.Port)
- if a.IP == nil {
- return net.JoinHostPort(a.Name, port)
- }
- return net.JoinHostPort(a.IP.String(), port)
-}
-
-// A Conn represents a forward proxy connection.
-type Conn struct {
- net.Conn
-
- boundAddr net.Addr
-}
-
-// BoundAddr returns the address assigned by the proxy server for
-// connecting to the command target address from the proxy server.
-func (c *Conn) BoundAddr() net.Addr {
- if c == nil {
- return nil
- }
- return c.boundAddr
-}
-
-// A Dialer holds SOCKS-specific options.
-type Dialer struct {
- cmd Command // either CmdConnect or cmdBind
- proxyNetwork string // network between a proxy server and a client
- proxyAddress string // proxy server address
-
- // ProxyDial specifies the optional dial function for
- // establishing the transport connection.
- ProxyDial func(context.Context, string, string) (net.Conn, error)
-
- // AuthMethods specifies the list of request authentication
- // methods.
- // If empty, SOCKS client requests only AuthMethodNotRequired.
- AuthMethods []AuthMethod
-
- // Authenticate specifies the optional authentication
- // function. It must be non-nil when AuthMethods is not empty.
- // It must return an error when the authentication is failed.
- Authenticate func(context.Context, io.ReadWriter, AuthMethod) error
-}
-
-// DialContext connects to the provided address on the provided
-// network.
-//
-// The returned error value may be a net.OpError. When the Op field of
-// net.OpError contains "socks", the Source field contains a proxy
-// server address and the Addr field contains a command target
-// address.
-//
-// See func Dial of the net package of standard library for a
-// description of the network and address parameters.
-func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if ctx == nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
- }
- var err error
- var c net.Conn
- if d.ProxyDial != nil {
- c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress)
- } else {
- var dd net.Dialer
- c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress)
- }
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- a, err := d.connect(ctx, c, address)
- if err != nil {
- c.Close()
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- return &Conn{Conn: c, boundAddr: a}, nil
-}
-
-// DialWithConn initiates a connection from SOCKS server to the target
-// network and address using the connection c that is already
-// connected to the SOCKS server.
-//
-// It returns the connection's local address assigned by the SOCKS
-// server.
-func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if ctx == nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
- }
- a, err := d.connect(ctx, c, address)
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- return a, nil
-}
-
-// Dial connects to the provided address on the provided network.
-//
-// Unlike DialContext, it returns a raw transport connection instead
-// of a forward proxy connection.
-//
-// Deprecated: Use DialContext or DialWithConn instead.
-func (d *Dialer) Dial(network, address string) (net.Conn, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- var err error
- var c net.Conn
- if d.ProxyDial != nil {
- c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress)
- } else {
- c, err = net.Dial(d.proxyNetwork, d.proxyAddress)
- }
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil {
- c.Close()
- return nil, err
- }
- return c, nil
-}
-
-func (d *Dialer) validateTarget(network, address string) error {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return errors.New("network not implemented")
- }
- switch d.cmd {
- case CmdConnect, cmdBind:
- default:
- return errors.New("command not implemented")
- }
- return nil
-}
-
-func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) {
- for i, s := range []string{d.proxyAddress, address} {
- host, port, err := splitHostPort(s)
- if err != nil {
- return nil, nil, err
- }
- a := &Addr{Port: port}
- a.IP = net.ParseIP(host)
- if a.IP == nil {
- a.Name = host
- }
- if i == 0 {
- proxy = a
- } else {
- dst = a
- }
- }
- return
-}
-
-// NewDialer returns a new Dialer that dials through the provided
-// proxy server's network and address.
-func NewDialer(network, address string) *Dialer {
- return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect}
-}
-
-const (
- authUsernamePasswordVersion = 0x01
- authStatusSucceeded = 0x00
-)
-
-// UsernamePassword are the credentials for the username/password
-// authentication method.
-type UsernamePassword struct {
- Username string
- Password string
-}
-
-// Authenticate authenticates a pair of username and password with the
-// proxy server.
-func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error {
- switch auth {
- case AuthMethodNotRequired:
- return nil
- case AuthMethodUsernamePassword:
- if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 {
- return errors.New("invalid username/password")
- }
- b := []byte{authUsernamePasswordVersion}
- b = append(b, byte(len(up.Username)))
- b = append(b, up.Username...)
- b = append(b, byte(len(up.Password)))
- b = append(b, up.Password...)
- // TODO(mikio): handle IO deadlines and cancelation if
- // necessary
- if _, err := rw.Write(b); err != nil {
- return err
- }
- if _, err := io.ReadFull(rw, b[:2]); err != nil {
- return err
- }
- if b[0] != authUsernamePasswordVersion {
- return errors.New("invalid username/password version")
- }
- if b[1] != authStatusSucceeded {
- return errors.New("username/password authentication failed")
- }
- return nil
- }
- return errors.New("unsupported authentication method " + strconv.Itoa(int(auth)))
-}
diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go
deleted file mode 100644
index 811c2e4e9..000000000
--- a/vendor/golang.org/x/net/proxy/dial.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-)
-
-// A ContextDialer dials using a context.
-type ContextDialer interface {
- DialContext(ctx context.Context, network, address string) (net.Conn, error)
-}
-
-// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment.
-//
-// The passed ctx is only used for returning the Conn, not the lifetime of the Conn.
-//
-// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer
-// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout.
-//
-// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
-func Dial(ctx context.Context, network, address string) (net.Conn, error) {
- d := FromEnvironment()
- if xd, ok := d.(ContextDialer); ok {
- return xd.DialContext(ctx, network, address)
- }
- return dialContext(ctx, d, network, address)
-}
-
-// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout
-// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
-func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) {
- var (
- conn net.Conn
- done = make(chan struct{}, 1)
- err error
- )
- go func() {
- conn, err = d.Dial(network, address)
- close(done)
- if conn != nil && ctx.Err() != nil {
- conn.Close()
- }
- }()
- select {
- case <-ctx.Done():
- err = ctx.Err()
- case <-done:
- }
- return conn, err
-}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
deleted file mode 100644
index 3d66bdef9..000000000
--- a/vendor/golang.org/x/net/proxy/direct.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-)
-
-type direct struct{}
-
-// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext.
-var Direct = direct{}
-
-var (
- _ Dialer = Direct
- _ ContextDialer = Direct
-)
-
-// Dial directly invokes net.Dial with the supplied parameters.
-func (direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
-
-// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters.
-func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
- var d net.Dialer
- return d.DialContext(ctx, network, addr)
-}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
deleted file mode 100644
index 573fe79e8..000000000
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
- "strings"
-)
-
-// A PerHost directs connections to a default Dialer unless the host name
-// requested matches one of a number of exceptions.
-type PerHost struct {
- def, bypass Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
- return &PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-// DialContext connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- d := p.dialerForRequest(host)
- if x, ok := d.(ContextDialer); ok {
- return x.DialContext(ctx, network, addr)
- }
- return dialContext(ctx, d, network, addr)
-}
-
-func (p *PerHost) dialerForRequest(host string) Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone ".example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a host name
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a host name that will use the bypass proxy.
-func (p *PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
deleted file mode 100644
index 9ff4b9a77..000000000
--- a/vendor/golang.org/x/net/proxy/proxy.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-package proxy // import "golang.org/x/net/proxy"
-
-import (
- "errors"
- "net"
- "net/url"
- "os"
- "sync"
-)
-
-// A Dialer is a means to establish a connection.
-// Custom dialers should also implement ContextDialer.
-type Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy-related
-// variables in the environment and makes underlying connections
-// directly.
-func FromEnvironment() Dialer {
- return FromEnvironmentUsing(Direct)
-}
-
-// FromEnvironmentUsing returns the dialer specify by the proxy-related
-// variables in the environment and makes underlying connections
-// using the provided forwarding Dialer (for instance, a *net.Dialer
-// with desired configuration).
-func FromEnvironmentUsing(forward Dialer) Dialer {
- allProxy := allProxyEnv.Get()
- if len(allProxy) == 0 {
- return forward
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return forward
- }
- proxy, err := FromURL(proxyURL, forward)
- if err != nil {
- return forward
- }
-
- noProxy := noProxyEnv.Get()
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := NewPerHost(proxy, forward)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
- if proxySchemes == nil {
- proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
- }
- proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
- var auth *Auth
- if u.User != nil {
- auth = new(Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5", "socks5h":
- addr := u.Hostname()
- port := u.Port()
- if port == "" {
- port = "1080"
- }
- return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxySchemes != nil {
- if f, ok := proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
-
-var (
- allProxyEnv = &envOnce{
- names: []string{"ALL_PROXY", "all_proxy"},
- }
- noProxyEnv = &envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-// envOnce looks up an environment variable (optionally by multiple
-// names) once. It mitigates expensive lookups on some platforms
-// (e.g. Windows).
-// (Borrowed from net/http/transport.go)
-type envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// reset is used by tests
-func (e *envOnce) reset() {
- e.once = sync.Once{}
- e.val = ""
-}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
deleted file mode 100644
index c91651f96..000000000
--- a/vendor/golang.org/x/net/proxy/socks5.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-
- "golang.org/x/net/internal/socks"
-)
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given
-// address with an optional username and password.
-// See RFC 1928 and RFC 1929.
-func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) {
- d := socks.NewDialer(network, address)
- if forward != nil {
- if f, ok := forward.(ContextDialer); ok {
- d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
- return f.DialContext(ctx, network, address)
- }
- } else {
- d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
- return dialContext(ctx, forward, network, address)
- }
- }
- }
- if auth != nil {
- up := socks.UsernamePassword{
- Username: auth.User,
- Password: auth.Password,
- }
- d.AuthMethods = []socks.AuthMethod{
- socks.AuthMethodNotRequired,
- socks.AuthMethodUsernamePassword,
- }
- d.Authenticate = up.Authenticate
- }
- return d, nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 74a2a9298..a28aaf42c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -99,11 +99,6 @@ github.com/VictoriaMetrics/easyproto
# github.com/VictoriaMetrics/fastcache v1.12.2
## explicit; go 1.13
github.com/VictoriaMetrics/fastcache
-# github.com/VictoriaMetrics/fasthttp v1.2.0
-## explicit; go 1.19
-github.com/VictoriaMetrics/fasthttp
-github.com/VictoriaMetrics/fasthttp/fasthttputil
-github.com/VictoriaMetrics/fasthttp/stackless
# github.com/VictoriaMetrics/metrics v1.31.0
## explicit; go 1.17
github.com/VictoriaMetrics/metrics
@@ -674,9 +669,7 @@ golang.org/x/net/http/httpproxy
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
-golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
-golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.16.0
## explicit; go 1.18