mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: downgrade github.com/valyala/fasthttp from v1.12.0 to v0.1.0
The v0.1.0 points to the last verified changes made by me.
I'm afraid that releases after v0.1.0 may contain completely broken changes like
996610f021
This commit is contained in:
parent
57407cca83
commit
e3cc329d85
32 changed files with 720 additions and 1939 deletions
5
go.mod
5
go.mod
|
@ -11,7 +11,10 @@ require (
|
|||
github.com/golang/protobuf v1.4.0 // indirect
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/klauspost/compress v1.10.5
|
||||
github.com/valyala/fasthttp v1.12.0
|
||||
|
||||
// do not update fasthttp releases because of issues like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
// in the new code.
|
||||
github.com/valyala/fasthttp v1.2.0
|
||||
github.com/valyala/fastjson v1.5.1
|
||||
github.com/valyala/fastrand v1.0.0
|
||||
github.com/valyala/gozstd v1.7.0
|
||||
|
|
5
go.sum
5
go.sum
|
@ -121,8 +121,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
|
||||
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
|
||||
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
|
@ -145,9 +143,8 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H
|
|||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk=
|
||||
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
|
||||
github.com/valyala/fasthttp v1.12.0 h1:TsB9qkSeiMXB40ELWWSRMjlsE+8IkqXHcs01y2d9aw0=
|
||||
github.com/valyala/fasthttp v1.12.0/go.mod h1:229t1eWu9UXTPmoUkbpN/fctKPBY4IJoFXQnxHGXy6E=
|
||||
github.com/valyala/fastjson v1.5.1 h1:SXaQZVSwLjZOVhDEhjiCcDtnX0Feu7Z7A1+C5atpoHM=
|
||||
github.com/valyala/fastjson v1.5.1/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
|
||||
|
|
1
vendor/github.com/valyala/fasthttp/.gitignore
generated
vendored
1
vendor/github.com/valyala/fasthttp/.gitignore
generated
vendored
|
@ -1,4 +1,3 @@
|
|||
tags
|
||||
*.pprof
|
||||
*.fasthttp.gz
|
||||
.idea
|
58
vendor/github.com/valyala/fasthttp/.travis.yml
generated
vendored
58
vendor/github.com/valyala/fasthttp/.travis.yml
generated
vendored
|
@ -1,56 +1,36 @@
|
|||
language: go
|
||||
|
||||
# Docker is required for fuzzit regression tests
|
||||
services:
|
||||
- docker
|
||||
|
||||
dist: bionic
|
||||
go:
|
||||
- tip
|
||||
- 1.11.x
|
||||
- 1.10.x
|
||||
- 1.9.x
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
go:
|
||||
- tip
|
||||
- 1.14.x
|
||||
- 1.13.x
|
||||
- 1.12.x
|
||||
- 1.11.x
|
||||
- 1.10.x
|
||||
- 1.9.x
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- tip
|
||||
fast_finish: true
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: "v/F0oI9zE9mcpEp4AVdHzSSHbe5ZFtH6B0i/BiUXKdQRQ10+JMPDOFRJQti7yxjMwltyd/QSFmR50Fl108sQYpo4xdlEXMHp2Y6OAN6crrp6PuHbLYgDWu3df/cH7/BqDyIq1uX8KZEeQssnygYN8hN4tpJCUg+NIb40Lm57Zsodt8DVjjyDWQQFDL7soNyAwGwQIqEyJsn+NUieXWEB1Qnt0xUtPIReuLlrwXR8wC1nLEjG9yz4ftDHHQdhVbO2b+xGWyaJ7QB5ixztaQP8Jnny6kSW9j6zEhJVuzdZ6d3xz23ibCbzSXBHdIUEI9u6ifQj8BYXr8fFS0FB3++IxgAYSs3ybZ+qEwuAxSBBm6YNW+3FrfDknVwTQscjKqnXPisjUqaRC9b31hke0tXzBq1488hE+wxMXeDM4LwWT5IMEO2gz0WGQXxmdVit72DIjCZxJkf1TvZZ0YH7Y//6wJTYYP9xulsy4gqu8CuFdWiF3fiGc3p5DTIS75nJ/Yy76Sa1pRPASKCujfLxtHE6Mt0XKvSolIXklYIzBkjN6vn80N6JIrqtqlimBGPW/Ec6+dwbmRe2AcOKRl4y7pZsGYhJhqdue1mucUYO/e2QeBZJGkqqG+zF5AW0v8x29BHvMwViAonc8o9eelkJ8khYzc/Qeq05pZnR/N/Pqfc+68k="
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
# - go get -v golang.org/x/tools/cmd/goimports
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: test
|
||||
script:
|
||||
# build test for supported platforms
|
||||
- GOOS=linux go build
|
||||
- GOOS=darwin go build
|
||||
- GOOS=freebsd go build
|
||||
- GOOS=windows go build
|
||||
- GOARCH=386 go build
|
||||
script:
|
||||
# TODO(@kirilldanshin)
|
||||
# - test -z "$(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*"))"
|
||||
# build test for supported platforms
|
||||
- GOOS=linux go build
|
||||
- GOOS=darwin go build
|
||||
- GOOS=freebsd go build
|
||||
- GOOS=windows go build
|
||||
- GOARCH=386 go build
|
||||
|
||||
# run tests on a standard platform
|
||||
- go test -v ./...
|
||||
# run tests on a standard platform
|
||||
- go test -v ./...
|
||||
|
||||
# run tests with the race detector as well
|
||||
- go test -race -v ./...
|
||||
- stage: fuzzit.dev
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.14
|
||||
script:
|
||||
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then ./fuzzit.sh fuzzing; fi
|
||||
- if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then ./fuzzit.sh local-regression; fi
|
||||
# run tests with the race detector as well
|
||||
- go test -race -v ./...
|
||||
|
|
24
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
24
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
|
@ -1,9 +1,25 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia, Kirill Danshin, Erik Dubbelboer, FastHTTP Authors
|
||||
Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia
|
||||
Copyright (c) 2018-present Kirill Danshin
|
||||
Copyright (c) 2018-present Erik Dubbelboer
|
||||
Copyright (c) 2018-present FastHTTP Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
25
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
25
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
|
@ -1,14 +1,15 @@
|
|||
# fasthttp [![Build Status](https://travis-ci.org/valyala/fasthttp.svg?branch=master)](https://travis-ci.org/valyala/fasthttp?branch=master) [![GoDoc](https://godoc.org/github.com/valyala/fasthttp?status.svg)](http://godoc.org/github.com/valyala/fasthttp) [![fuzzit](https://app.fuzzit.dev/badge?org_id=fasthttp&branch=master)](https://fuzzit.dev) [![Go Report](https://goreportcard.com/badge/github.com/valyala/fasthttp)](https://goreportcard.com/report/github.com/valyala/fasthttp) [![Sourcegraph](https://sourcegraph.com/github.com/valyala/fasthttp/-/badge.svg)](https://sourcegraph.com/github.com/valyala/fasthttp?badge)
|
||||
|
||||
![FastHTTP – Fastest and reliable HTTP implementation in Go](https://github.com/fasthttp/docs-assets/raw/master/banner@0.5.png)
|
||||
[![Build Status](https://travis-ci.org/valyala/fasthttp.svg)](https://travis-ci.org/valyala/fasthttp)
|
||||
[![GoDoc](https://godoc.org/github.com/valyala/fasthttp?status.svg)](http://godoc.org/github.com/valyala/fasthttp)
|
||||
[![Go Report](https://goreportcard.com/badge/github.com/valyala/fasthttp)](https://goreportcard.com/report/github.com/valyala/fasthttp)
|
||||
|
||||
# fasthttp
|
||||
Fast HTTP implementation for Go.
|
||||
|
||||
Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/)
|
||||
in a production serving up to 200K rps from more than 1.5M concurrent keep-alive
|
||||
connections per physical server.
|
||||
|
||||
[TechEmpower Benchmark round 18 results](https://www.techempower.com/benchmarks/#section=data-r18&hw=ph&test=plaintext)
|
||||
[TechEmpower Benchmark round 12 results](https://www.techempower.com/benchmarks/#section=data-r12&hw=peak&test=plaintext)
|
||||
|
||||
[Server Benchmarks](#http-server-performance-comparison-with-nethttp)
|
||||
|
||||
|
@ -169,7 +170,7 @@ go get -u github.com/valyala/fasthttp
|
|||
Unfortunately, fasthttp doesn't provide API identical to net/http.
|
||||
See the [FAQ](#faq) for details.
|
||||
There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor),
|
||||
but it is better to write fasthttp request handlers by hand in order to use
|
||||
but it is better to write fasthttp request handlers by hand in order to use
|
||||
all of the fasthttp advantages (especially high performance :) ).
|
||||
|
||||
Important points:
|
||||
|
@ -280,7 +281,6 @@ with fasthttp support:
|
|||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
* [atreugo](https://github.com/savsgio/atreugo)
|
||||
* [Fiber](https://github.com/gofiber/fiber)
|
||||
|
||||
Net/http code with simple ServeMux is trivially converted to fasthttp code:
|
||||
|
||||
|
@ -399,7 +399,7 @@ instead of [html/template](https://golang.org/pkg/html/template/).
|
|||
* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset).
|
||||
* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores.
|
||||
See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details.
|
||||
* Use Go 1.13 as it provides some considerable performance improvements.
|
||||
* Use Go 1.6 as it provides some considerable performance improvements.
|
||||
|
||||
|
||||
# Fasthttp best practices
|
||||
|
@ -486,18 +486,14 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
|||
powerful routing package for fasthttp servers.
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high
|
||||
performance fasthttp request router that scales well.
|
||||
* [fastws](https://github.com/fasthttp/fastws) - Bloatless WebSocket package made for fasthttp
|
||||
to handle Read/Write operations concurrently.
|
||||
* [gramework](https://github.com/gramework/gramework) - a web framework made by one of fasthttp maintainers
|
||||
* [lu](https://github.com/vincentLiuxiang/lu) - a high performance
|
||||
go middleware web framework which is based on fasthttp.
|
||||
* [websocket](https://github.com/fasthttp/websocket) - Gorilla-based
|
||||
websocket implementation for fasthttp.
|
||||
* [fasthttpsession](https://github.com/phachon/fasthttpsession) - a fast and powerful session package for fasthttp servers.
|
||||
* [atreugo](https://github.com/savsgio/atreugo) - High performance and extensible micro web framework with zero memory allocations in hot paths.
|
||||
* [atreugo](https://github.com/savsgio/atreugo) - Micro-framework to make simple the use of routing and middlewares.
|
||||
* [kratgo](https://github.com/savsgio/kratgo) - Simple, lightweight and ultra-fast HTTP Cache to speed up your websites.
|
||||
* [kit-plugins](https://github.com/wencan/kit-plugins/tree/master/transport/fasthttp) - go-kit transport implementation for fasthttp.
|
||||
* [Fiber](https://github.com/gofiber/fiber) - An Expressjs inspired web framework running on Fasthttp
|
||||
|
||||
|
||||
# FAQ
|
||||
|
@ -527,7 +523,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
|||
|
||||
* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?*
|
||||
|
||||
[HTTP/2.0 support](https://github.com/fasthttp/http2) is in progress. [WebSockets](https://github.com/fasthttp/websockets) has been done already.
|
||||
[HTTP/2.0 support](https://github.com/fasthttp/http2) is in progress. [WebSockets](https://github.com/fasthttp/websockets) has been done already.
|
||||
Third parties also may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
||||
for implementing these goodies.
|
||||
|
||||
|
@ -552,7 +548,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
|||
|
||||
Go1.5+. Older versions won't be supported, since their standard package
|
||||
[miss useful functions](https://github.com/valyala/fasthttp/issues/5).
|
||||
|
||||
|
||||
**NOTE**: Go 1.9.7 is the oldest tested version. We recommend you to update as soon as you can. As of 1.11.3 we will drop 1.9.x support.
|
||||
|
||||
* *Please provide real benchmark data and server information*
|
||||
|
@ -569,7 +565,6 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
|||
* [gramework](https://github.com/gramework/gramework)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
* [atreugo](https://github.com/savsgio/atreugo)
|
||||
* [Fiber](https://github.com/gofiber/fiber)
|
||||
|
||||
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.
|
||||
|
||||
|
|
115
vendor/github.com/valyala/fasthttp/SECURITY.md
generated
vendored
115
vendor/github.com/valyala/fasthttp/SECURITY.md
generated
vendored
|
@ -1,115 +0,0 @@
|
|||
### TL;DR
|
||||
|
||||
We use a simplified version of [Golang Security Policy](https://golang.org/security).
|
||||
For example, for now we skip CVE assignment.
|
||||
|
||||
### Reporting a Security Bug
|
||||
|
||||
Please report to us any issues you find. This document explains how to do that and what to expect in return.
|
||||
|
||||
All security bugs in our releases should be reported by email to oss-security@highload.solutions.
|
||||
This mail is delivered to a small security team.
|
||||
Your email will be acknowledged within 24 hours, and you'll receive a more detailed response
|
||||
to your email within 72 hours indicating the next steps in handling your report.
|
||||
For critical problems, you can encrypt your report using our PGP key (listed below).
|
||||
|
||||
Please use a descriptive subject line for your report email.
|
||||
After the initial reply to your report, the security team will
|
||||
endeavor to keep you informed of the progress being made towards a fix and full announcement.
|
||||
These updates will be sent at least every five days.
|
||||
In reality, this is more likely to be every 24-48 hours.
|
||||
|
||||
If you have not received a reply to your email within 48 hours or you have not heard from the security
|
||||
team for the past five days please contact us by email to developers@highload.solutions or by Telegram message
|
||||
to [our support](https://t.me/highload_support).
|
||||
Please note that developers@highload.solutions list includes all developers, who may be outside our opensource security team.
|
||||
When escalating on this list, please do not disclose the details of the issue.
|
||||
Simply state that you're trying to reach a member of the security team.
|
||||
|
||||
### Flagging Existing Issues as Security-related
|
||||
|
||||
If you believe that an existing issue is security-related, we ask that you send an email to oss-security@highload.solutions.
|
||||
The email should include the issue ID and a short description of why it should be handled according to this security policy.
|
||||
|
||||
### Disclosure Process
|
||||
|
||||
Our project uses the following disclosure process:
|
||||
|
||||
- Once the security report is received it is assigned a primary handler. This person coordinates the fix and release process.
|
||||
- The issue is confirmed and a list of affected software is determined.
|
||||
- Code is audited to find any potential similar problems.
|
||||
- Fixes are prepared for the two most recent major releases and the head/master revision. These fixes are not yet committed to the public repository.
|
||||
- To notify users, a new issue without security details is submitted to our GitHub repository.
|
||||
- Three working days following this notification, the fixes are applied to the public repository and a new release is issued.
|
||||
- On the date that the fixes are applied, announcement is published in the issue.
|
||||
|
||||
This process can take some time, especially when coordination is required with maintainers of other projects.
|
||||
Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow
|
||||
the process described above to ensure that disclosures are handled consistently.
|
||||
|
||||
### Receiving Security Updates
|
||||
The best way to receive security announcements is to subscribe ("Watch") to our repository.
|
||||
Any GitHub issues pertaining to a security issue will be prefixed with [security].
|
||||
|
||||
### Comments on This Policy
|
||||
If you have any suggestions to improve this policy, please send an email to oss-security@highload.solutions for discussion.
|
||||
|
||||
### PGP Key for oss-security@highload.ltd
|
||||
|
||||
We accept PGP-encrypted email, but the majority of the security team are not regular PGP users
|
||||
so it's somewhat inconvenient. Please only use PGP for critical security reports.
|
||||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFzdjYUBEACa3YN+QVSlnXofUjxr+YrmIaF+da0IUq+TRM4aqUXALsemEdGh
|
||||
yIl7Z6qOOy1d2kPe6t//H9l/92lJ1X7i6aEBK4n/pnPZkwbpy9gGpebgvTZFvcbe
|
||||
mFhF6k1FM35D8TxneJSjizPyGhJPqcr5qccqf8R64TlQx5Ud1JqT2l8P1C5N7gNS
|
||||
lEYXq1h4zBCvTWk1wdeLRRPx7Bn6xrgmyu/k61dLoJDvpvWNATVFDA67oTrPgzTW
|
||||
xtLbbk/xm0mK4a8zMzIpNyz1WkaJW9+4HFXaL+yKlsx7iHe2O7VlGoqS0kdeQup4
|
||||
1HIw/P7yc0jBlNMLUzpuA6ElYUwESWsnCI71YY1x4rKgI+GqH1mWwgn7tteuXQtb
|
||||
Zj0vEdjK3IKIOSbzbzAvSbDt8F1+o7EMtdy1eUysjKSQgFkDlT6JRmYvEup5/IoG
|
||||
iknh/InQq9RmGFKii6pXWWoltC0ebfCwYOXvymyDdr/hYDqJeHS9Tenpy86Doaaf
|
||||
HGf5nIFAMB2G5ctNpBwzNXR2MAWkeHQgdr5a1xmog0hS125usjnUTet3QeCyo4kd
|
||||
gVouoOroMcqFFUXdYaMH4c3KWz0afhTmIaAsFFOv/eMdadVA4QyExTJf3TAoQ+kH
|
||||
lKDlbOAIxEZWRPDFxMRixaVPQC+VxhBcaQ+yNoaUkM0V2m8u8sDBpzi1OQARAQAB
|
||||
tDxPU1MgU2VjdXJpdHksIEhpZ2hsb2FkIExURCA8b3NzLXNlY3VyaXR5QGhpZ2hs
|
||||
b2FkLnNvbHV0aW9ucz6JAlQEEwEIAD4WIQRljYp380uKq2g8TeqsQcvu+Qp2TAUC
|
||||
XN2NhQIbAwUJB4YfgAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCsQcvu+Qp2
|
||||
TKmED/96YoQoOjD28blFFrigvAsiNcNNZoX9I0dX1lNpD83fBJf+/9i+x4jqUnI5
|
||||
5XK/DFTDbhpw8kQBpxS9eEuIYnuo0RdLLp1ctNWTlpwfyHn92mGddl/uBdYHUuUk
|
||||
cjhIQcFaCcWRY+EpamDlv1wmZ83IwBr8Hu5FS+/Msyw1TBvtTRVKW1KoGYMYoXLk
|
||||
BzIglRPwn821B6s4BvK/RJnZkrmHMBZBfYMf+iSMSYd2yPmfT8wbcAjgjLfQa28U
|
||||
gbt4u9xslgKjuM83IqwFfEXBnm7su3OouGWqc+62mQTsbnK65zRFnx6GXRXC1BAi
|
||||
6m9Tm1PU0IiINz66ainquspkXYeHjd9hTwfR3BdFnzBTRRM01cKMFabWbLj8j0p8
|
||||
fF4g9cxEdiLrzEF7Yz4WY0mI4Cpw4eJZfsHMc07Jn7QxfJhIoq+rqBOtEmTjnxMh
|
||||
aWeykoXMHlZN4K0ZrAytozVH1D4bugWA9Zuzi9U3F9hrVVABm11yyhd2iSqI6/FR
|
||||
GcCFOCBW1kEJbzoEguub+BV8LDi8ldljHalvur5k/VFhoDBxniYNsKmiCLVCmDWs
|
||||
/nF84hCReAOJt0vDGwqHe3E2BFFPbKwdJLRNkjxBY0c/pvaV+JxbWQmaxDZNeIFV
|
||||
hFcVGp48HNY3qLWZdsQIfT9m1masJFLVuq8Wx7bYs8Et5eFnH7kCDQRc3Y2FARAA
|
||||
2DJWAxABydyIdCxgFNdqnYyWS46vh2DmLmRMqgasNlD0ozG4S9bszBsgnUI2Xs06
|
||||
J76kFRh8MMHcu9I4lUKCQzfrA4uHkiOK5wvNCaWP+C6JUYNHsqPwk/ILO3gtQ/Ws
|
||||
LLf/PW3rJZVOZB+WY8iaYc20l5vukTaVw4qbEi9dtLkJvVpNHt//+jayXU6s3ew1
|
||||
2X5xdwyAZxaxlnzFaY/Xo/qR+bZhVFC0T9pAECnHv9TVhFGp0JE9ipPGnro5xTIS
|
||||
LttdAkzv4AuSVTIgWgTkh8nN8t7STJqfPEv0I12nmmYHMUyTYOurkfskF3jY2x6x
|
||||
8l02NQ4d5KdC3ReV1j51swrGcZCwsWNp51jnEXKwo+B0NM5OmoRrNJgF2iDgLehs
|
||||
hP00ljU7cB8/1/7kdHZStYaUHICFOFqHzg415FlYm+jpY0nJp/b9BAO0d0/WYnEe
|
||||
Xjihw8EVBAqzEt4kay1BQonZAypeYnGBJr7vNvdiP+mnRwly5qZSGiInxGvtZZFt
|
||||
zL1E3osiF+muQxFcM63BeGdJeYXy+MoczkWa4WNggfcHlGAZkMYiv28zpr4PfrK9
|
||||
mvj4Nu8s71PE9pPpBoZcNDf9v1sHuu96jDSITsPx5YMvvKZWhzJXFKzk6YgAsNH/
|
||||
MF0G+/qmKJZpCdvtHKpYM1uHX85H81CwWJFfBPthyD8AEQEAAYkCPAQYAQgAJhYh
|
||||
BGWNinfzS4qraDxN6qxBy+75CnZMBQJc3Y2FAhsMBQkHhh+AAAoJEKxBy+75CnZM
|
||||
Rn8P/RyL1bhU4Q4WpvmlkepCAwNA0G3QvnKcSZNHEPE5h7H3IyrA/qy16A9eOsgm
|
||||
sthsHYlo5A5lRIy4wPHkFCClMrMHdKuoS72//qgw+oOrBcwb7Te+Nas+ewhaJ7N9
|
||||
vAX06vDH9bLl52CPbtats5+eBpePgP3HDPxd7CWHxq9bzJTbzqsTkN7JvoovR2dP
|
||||
itPJDij7QYLYVEM1t7QxUVpVwAjDi/kCtC9ts5L+V0snF2n3bHZvu04EXdpvxOQI
|
||||
pG/7Q+/WoI8NU6Bb/FA3tJGYIhSwI3SY+5XV/TAZttZaYSh2SD8vhc+eo+gW9sAN
|
||||
xa+VESBQCht9+tKIwEwHs1efoRgFdbwwJ2c+33+XydQ6yjdXoX1mn2uyCr82jorZ
|
||||
xTzbkY04zr7oZ+0fLpouOFg/mrSL4w2bWEhdHuyoVthLBjnRme0wXCaS3g3mYdLG
|
||||
nSUkogOGOOvvvBtoq/vfx0Eu79piUtw5D8yQSrxLDuz8GxCrVRZ0tYIHb26aTE9G
|
||||
cDsW/Lg5PjcY/LgVNEWOxDQDFVurlImnlVJFb3q+NrWvPbgeIEWwJDCay/z25SEH
|
||||
k3bSOXLp8YGRnlkWUmoeL4g/CCK52iAAlfscZNoKMILhBnbCoD657jpa5GQKJj/U
|
||||
Q8kjgr7kwV/RSosNV9HCPj30mVyiCQ1xg+ZLzMKXVCuBWd+G
|
||||
=lnt2
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
2
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
|
@ -44,7 +44,7 @@ var argsPool = &sync.Pool{
|
|||
//
|
||||
// Args instance MUST NOT be used from concurrently running goroutines.
|
||||
type Args struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
args []argsKV
|
||||
buf []byte
|
||||
|
|
117
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
117
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
//go:generate go run bytesconv_table_gen.go
|
||||
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
|
@ -273,9 +271,7 @@ func readHexInt(r *bufio.Reader) (int, error) {
|
|||
if i == 0 {
|
||||
return -1, errEmptyHexNum
|
||||
}
|
||||
if err := r.UnreadByte(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
r.UnreadByte()
|
||||
return n, nil
|
||||
}
|
||||
if i >= maxHexIntChars {
|
||||
|
@ -300,7 +296,7 @@ func writeHexInt(w *bufio.Writer, n int) error {
|
|||
buf := v.([]byte)
|
||||
i := len(buf) - 1
|
||||
for {
|
||||
buf[i] = lowerhex[n&0xf]
|
||||
buf[i] = int2hexbyte(n & 0xf)
|
||||
n >>= 4
|
||||
if n == 0 {
|
||||
break
|
||||
|
@ -312,10 +308,61 @@ func writeHexInt(w *bufio.Writer, n int) error {
|
|||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
upperhex = "0123456789ABCDEF"
|
||||
lowerhex = "0123456789abcdef"
|
||||
)
|
||||
func int2hexbyte(n int) byte {
|
||||
if n < 10 {
|
||||
return '0' + byte(n)
|
||||
}
|
||||
return 'a' + byte(n) - 10
|
||||
}
|
||||
|
||||
func hexCharUpper(c byte) byte {
|
||||
if c < 10 {
|
||||
return '0' + c
|
||||
}
|
||||
return c - 10 + 'A'
|
||||
}
|
||||
|
||||
var hex2intTable = func() []byte {
|
||||
b := make([]byte, 256)
|
||||
for i := 0; i < 256; i++ {
|
||||
c := byte(16)
|
||||
if i >= '0' && i <= '9' {
|
||||
c = byte(i) - '0'
|
||||
} else if i >= 'a' && i <= 'f' {
|
||||
c = byte(i) - 'a' + 10
|
||||
} else if i >= 'A' && i <= 'F' {
|
||||
c = byte(i) - 'A' + 10
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return b
|
||||
}()
|
||||
|
||||
const toLower = 'a' - 'A'
|
||||
|
||||
var toLowerTable = func() [256]byte {
|
||||
var a [256]byte
|
||||
for i := 0; i < 256; i++ {
|
||||
c := byte(i)
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += toLower
|
||||
}
|
||||
a[i] = c
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
var toUpperTable = func() [256]byte {
|
||||
var a [256]byte
|
||||
for i := 0; i < 256; i++ {
|
||||
c := byte(i)
|
||||
if c >= 'a' && c <= 'z' {
|
||||
c -= toLower
|
||||
}
|
||||
a[i] = c
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
func lowercaseBytes(b []byte) {
|
||||
for i := 0; i < len(b); i++ {
|
||||
|
@ -330,7 +377,6 @@ func lowercaseBytes(b []byte) {
|
|||
// Note it may break if string and/or slice header will change
|
||||
// in the future go versions.
|
||||
func b2s(b []byte) string {
|
||||
/* #nosec G103 */
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
|
@ -338,15 +384,14 @@ func b2s(b []byte) string {
|
|||
//
|
||||
// Note it may break if string and/or slice header will change
|
||||
// in the future go versions.
|
||||
func s2b(s string) (b []byte) {
|
||||
/* #nosec G103 */
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
/* #nosec G103 */
|
||||
sh := *(*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
bh.Data = sh.Data
|
||||
bh.Len = sh.Len
|
||||
bh.Cap = sh.Len
|
||||
return b
|
||||
func s2b(s string) []byte {
|
||||
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return *(*[]byte)(unsafe.Pointer(&bh))
|
||||
}
|
||||
|
||||
// AppendUnquotedArg appends url-decoded src to dst and returns appended dst.
|
||||
|
@ -359,29 +404,33 @@ func AppendUnquotedArg(dst, src []byte) []byte {
|
|||
// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
|
||||
func AppendQuotedArg(dst, src []byte) []byte {
|
||||
for _, c := range src {
|
||||
switch {
|
||||
case c == ' ':
|
||||
dst = append(dst, '+')
|
||||
case quotedArgShouldEscapeTable[int(c)] != 0:
|
||||
dst = append(dst, '%', upperhex[c>>4], upperhex[c&0xf])
|
||||
default:
|
||||
// See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
||||
c == '*' || c == '-' || c == '.' || c == '_' {
|
||||
dst = append(dst, c)
|
||||
} else {
|
||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendQuotedPath(dst, src []byte) []byte {
|
||||
// Fix issue in https://github.com/golang/go/issues/11202
|
||||
if len(src) == 1 && src[0] == '*' {
|
||||
return append(dst, '*')
|
||||
}
|
||||
|
||||
for _, c := range src {
|
||||
if quotedPathShouldEscapeTable[int(c)] != 0 {
|
||||
dst = append(dst, '%', upperhex[c>>4], upperhex[c&15])
|
||||
} else {
|
||||
// From the spec: http://tools.ietf.org/html/rfc3986#section-3.3
|
||||
// an path can contain zero or more of pchar that is defined as follows:
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
||||
c == '-' || c == '.' || c == '_' || c == '~' || c == '!' || c == '$' ||
|
||||
c == '&' || c == '\'' || c == '(' || c == ')' || c == '*' || c == '+' ||
|
||||
c == ',' || c == ';' || c == '=' || c == ':' || c == '@' || c == '/' {
|
||||
dst = append(dst, c)
|
||||
} else {
|
||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
|
|
10
vendor/github.com/valyala/fasthttp/bytesconv_table.go
generated
vendored
10
vendor/github.com/valyala/fasthttp/bytesconv_table.go
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
package fasthttp
|
||||
|
||||
// Code generated by go run bytesconv_table_gen.go; DO NOT EDIT.
|
||||
// See bytesconv_table_gen.go for more information about these tables.
|
||||
|
||||
const hex2intTable = "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x00\x01\x02\x03\x04\x05\x06\a\b\t\x10\x10\x10\x10\x10\x10\x10\n\v\f\r\x0e\x0f\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\n\v\f\r\x0e\x0f\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10"
|
||||
const toLowerTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@abcdefghijklmnopqrstuvwxyz[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
const toUpperTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
const quotedArgShouldEscapeTable = "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01"
|
||||
const quotedPathShouldEscapeTable = "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x01\x00\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01"
|
604
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
604
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
|
@ -8,7 +8,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -97,30 +96,6 @@ func DoDeadline(req *Request, resp *Response, deadline time.Time) error {
|
|||
return defaultClient.DoDeadline(req, resp, deadline)
|
||||
}
|
||||
|
||||
// DoRedirects performs the given http request and fills the given http response,
|
||||
// following up to maxRedirectsCount redirects. When the redirect count exceeds
|
||||
// maxRedirectsCount, ErrTooManyRedirects is returned.
|
||||
//
|
||||
// Request must contain at least non-zero RequestURI with full url (including
|
||||
// scheme and host) or non-zero Host header + RequestURI.
|
||||
//
|
||||
// Client determines the server to be requested in the following order:
|
||||
//
|
||||
// - from RequestURI if it contains full url with scheme and host;
|
||||
// - from Host header otherwise.
|
||||
//
|
||||
// Response is ignored if resp is nil.
|
||||
//
|
||||
// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections
|
||||
// to the requested host are busy.
|
||||
//
|
||||
// It is recommended obtaining req and resp via AcquireRequest
|
||||
// and AcquireResponse in performance-critical code.
|
||||
func DoRedirects(req *Request, resp *Response, maxRedirectsCount int) error {
|
||||
_, _, err := doRequestFollowRedirects(req, resp, req.URI().String(), maxRedirectsCount, &defaultClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
|
@ -177,7 +152,7 @@ var defaultClient Client
|
|||
//
|
||||
// It is safe calling Client methods from concurrently running goroutines.
|
||||
type Client struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Client name. Used in User-Agent request header.
|
||||
//
|
||||
|
@ -218,11 +193,6 @@ type Client struct {
|
|||
// after DefaultMaxIdleConnDuration.
|
||||
MaxIdleConnDuration time.Duration
|
||||
|
||||
// Keep-alive connections are closed after this duration.
|
||||
//
|
||||
// By default connection duration is unlimited.
|
||||
MaxConnDuration time.Duration
|
||||
|
||||
// Maximum number of attempts for idempotent calls
|
||||
//
|
||||
// DefaultMaxIdemponentCallAttempts is used if not set.
|
||||
|
@ -275,20 +245,6 @@ type Client struct {
|
|||
// * cONTENT-lenGTH -> Content-Length
|
||||
DisableHeaderNamesNormalizing bool
|
||||
|
||||
// Path values are sent as-is without normalization
|
||||
//
|
||||
// Disabled path normalization may be useful for proxying incoming requests
|
||||
// to servers that are expecting paths to be forwarded as-is.
|
||||
//
|
||||
// By default path values are normalized, i.e.
|
||||
// extra slashes are removed, special characters are encoded.
|
||||
DisablePathNormalizing bool
|
||||
|
||||
// Maximum duration for waiting for a free connection.
|
||||
//
|
||||
// By default will not waiting, return ErrNoFreeConns immediately
|
||||
MaxConnWaitTimeout time.Duration
|
||||
|
||||
mLock sync.Mutex
|
||||
m map[string]*HostClient
|
||||
ms map[string]*HostClient
|
||||
|
@ -401,30 +357,6 @@ func (c *Client) DoDeadline(req *Request, resp *Response, deadline time.Time) er
|
|||
return clientDoDeadline(req, resp, deadline, c)
|
||||
}
|
||||
|
||||
// DoRedirects performs the given http request and fills the given http response,
|
||||
// following up to maxRedirectsCount redirects. When the redirect count exceeds
|
||||
// maxRedirectsCount, ErrTooManyRedirects is returned.
|
||||
//
|
||||
// Request must contain at least non-zero RequestURI with full url (including
|
||||
// scheme and host) or non-zero Host header + RequestURI.
|
||||
//
|
||||
// Client determines the server to be requested in the following order:
|
||||
//
|
||||
// - from RequestURI if it contains full url with scheme and host;
|
||||
// - from Host header otherwise.
|
||||
//
|
||||
// Response is ignored if resp is nil.
|
||||
//
|
||||
// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections
|
||||
// to the requested host are busy.
|
||||
//
|
||||
// It is recommended obtaining req and resp via AcquireRequest
|
||||
// and AcquireResponse in performance-critical code.
|
||||
func (c *Client) DoRedirects(req *Request, resp *Response, maxRedirectsCount int) error {
|
||||
_, _, err := doRequestFollowRedirects(req, resp, req.URI().String(), maxRedirectsCount, c)
|
||||
return err
|
||||
}
|
||||
|
||||
// Do performs the given http request and fills the given http response.
|
||||
//
|
||||
// Request must contain at least non-zero RequestURI with full url (including
|
||||
|
@ -483,7 +415,6 @@ func (c *Client) Do(req *Request, resp *Response) error {
|
|||
TLSConfig: c.TLSConfig,
|
||||
MaxConns: c.MaxConnsPerHost,
|
||||
MaxIdleConnDuration: c.MaxIdleConnDuration,
|
||||
MaxConnDuration: c.MaxConnDuration,
|
||||
MaxIdemponentCallAttempts: c.MaxIdemponentCallAttempts,
|
||||
ReadBufferSize: c.ReadBufferSize,
|
||||
WriteBufferSize: c.WriteBufferSize,
|
||||
|
@ -491,8 +422,6 @@ func (c *Client) Do(req *Request, resp *Response) error {
|
|||
WriteTimeout: c.WriteTimeout,
|
||||
MaxResponseBodySize: c.MaxResponseBodySize,
|
||||
DisableHeaderNamesNormalizing: c.DisableHeaderNamesNormalizing,
|
||||
DisablePathNormalizing: c.DisablePathNormalizing,
|
||||
MaxConnWaitTimeout: c.MaxConnWaitTimeout,
|
||||
}
|
||||
m[string(host)] = hc
|
||||
if len(m) == 1 {
|
||||
|
@ -571,7 +500,7 @@ type DialFunc func(addr string) (net.Conn, error)
|
|||
//
|
||||
// It is safe calling HostClient methods from concurrently running goroutines.
|
||||
type HostClient struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Comma-separated list of upstream HTTP server host addresses,
|
||||
// which are passed to Dial in a round-robin manner.
|
||||
|
@ -684,27 +613,12 @@ type HostClient struct {
|
|||
// * cONTENT-lenGTH -> Content-Length
|
||||
DisableHeaderNamesNormalizing bool
|
||||
|
||||
// Path values are sent as-is without normalization
|
||||
//
|
||||
// Disabled path normalization may be useful for proxying incoming requests
|
||||
// to servers that are expecting paths to be forwarded as-is.
|
||||
//
|
||||
// By default path values are normalized, i.e.
|
||||
// extra slashes are removed, special characters are encoded.
|
||||
DisablePathNormalizing bool
|
||||
|
||||
// Maximum duration for waiting for a free connection.
|
||||
//
|
||||
// By default will not waiting, return ErrNoFreeConns immediately
|
||||
MaxConnWaitTimeout time.Duration
|
||||
|
||||
clientName atomic.Value
|
||||
lastUseTime uint32
|
||||
|
||||
connsLock sync.Mutex
|
||||
connsCount int
|
||||
conns []*clientConn
|
||||
connsWait *wantConnQueue
|
||||
|
||||
addrsLock sync.Mutex
|
||||
addrs []string
|
||||
|
@ -791,7 +705,7 @@ type clientDoer interface {
|
|||
func clientGetURL(dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) {
|
||||
req := AcquireRequest()
|
||||
|
||||
statusCode, body, err = doRequestFollowRedirectsBuffer(req, dst, url, c)
|
||||
statusCode, body, err = doRequestFollowRedirects(req, dst, url, c)
|
||||
|
||||
ReleaseRequest(req)
|
||||
return statusCode, body, err
|
||||
|
@ -831,7 +745,7 @@ func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDo
|
|||
// concurrent requests, since timed out requests on client side
|
||||
// usually continue execution on the host.
|
||||
go func() {
|
||||
statusCodeCopy, bodyCopy, errCopy := doRequestFollowRedirectsBuffer(req, dst, url, c)
|
||||
statusCodeCopy, bodyCopy, errCopy := doRequestFollowRedirects(req, dst, url, c)
|
||||
ch <- clientURLResponse{
|
||||
statusCode: statusCodeCopy,
|
||||
body: bodyCopy,
|
||||
|
@ -863,50 +777,32 @@ func clientPostURL(dst []byte, url string, postArgs *Args, c clientDoer) (status
|
|||
req.Header.SetMethodBytes(strPost)
|
||||
req.Header.SetContentTypeBytes(strPostArgsContentType)
|
||||
if postArgs != nil {
|
||||
if _, err := postArgs.WriteTo(req.BodyWriter()); err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
postArgs.WriteTo(req.BodyWriter())
|
||||
}
|
||||
|
||||
statusCode, body, err = doRequestFollowRedirectsBuffer(req, dst, url, c)
|
||||
statusCode, body, err = doRequestFollowRedirects(req, dst, url, c)
|
||||
|
||||
ReleaseRequest(req)
|
||||
return statusCode, body, err
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMissingLocation is returned by clients when the Location header is missing on
|
||||
// an HTTP response with a redirect status code.
|
||||
ErrMissingLocation = errors.New("missing Location header for http redirect")
|
||||
// ErrTooManyRedirects is returned by clients when the number of redirects followed
|
||||
// exceed the max count.
|
||||
ErrTooManyRedirects = errors.New("too many redirects detected when doing the request")
|
||||
errMissingLocation = errors.New("missing Location header for http redirect")
|
||||
errTooManyRedirects = errors.New("too many redirects detected when doing the request")
|
||||
)
|
||||
|
||||
const defaultMaxRedirectsCount = 16
|
||||
const maxRedirectsCount = 16
|
||||
|
||||
func doRequestFollowRedirectsBuffer(req *Request, dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) {
|
||||
func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) {
|
||||
resp := AcquireResponse()
|
||||
bodyBuf := resp.bodyBuffer()
|
||||
resp.keepBodyBuffer = true
|
||||
oldBody := bodyBuf.B
|
||||
bodyBuf.B = dst
|
||||
|
||||
statusCode, body, err = doRequestFollowRedirects(req, resp, url, defaultMaxRedirectsCount, c)
|
||||
|
||||
body = bodyBuf.B
|
||||
bodyBuf.B = oldBody
|
||||
resp.keepBodyBuffer = false
|
||||
ReleaseResponse(resp)
|
||||
|
||||
return statusCode, body, err
|
||||
}
|
||||
|
||||
func doRequestFollowRedirects(req *Request, resp *Response, url string, maxRedirectsCount int, c clientDoer) (statusCode int, body []byte, err error) {
|
||||
scheme := req.uri.Scheme()
|
||||
req.schemaUpdate = false
|
||||
redirectsCount := 0
|
||||
|
||||
redirectsCount := 0
|
||||
for {
|
||||
// In case redirect to different scheme
|
||||
if redirectsCount > 0 && !bytes.Equal(scheme, req.uri.Scheme()) {
|
||||
|
@ -929,23 +825,32 @@ func doRequestFollowRedirects(req *Request, resp *Response, url string, maxRedir
|
|||
break
|
||||
}
|
||||
statusCode = resp.Header.StatusCode()
|
||||
if !StatusCodeIsRedirect(statusCode) {
|
||||
if statusCode != StatusMovedPermanently &&
|
||||
statusCode != StatusFound &&
|
||||
statusCode != StatusSeeOther &&
|
||||
statusCode != StatusTemporaryRedirect &&
|
||||
statusCode != StatusPermanentRedirect {
|
||||
break
|
||||
}
|
||||
|
||||
redirectsCount++
|
||||
if redirectsCount > maxRedirectsCount {
|
||||
err = ErrTooManyRedirects
|
||||
err = errTooManyRedirects
|
||||
break
|
||||
}
|
||||
location := resp.Header.peek(strLocation)
|
||||
if len(location) == 0 {
|
||||
err = ErrMissingLocation
|
||||
err = errMissingLocation
|
||||
break
|
||||
}
|
||||
url = getRedirectURL(url, location)
|
||||
}
|
||||
|
||||
body = bodyBuf.B
|
||||
bodyBuf.B = oldBody
|
||||
resp.keepBodyBuffer = false
|
||||
ReleaseResponse(resp)
|
||||
|
||||
return statusCode, body, err
|
||||
}
|
||||
|
||||
|
@ -958,15 +863,6 @@ func getRedirectURL(baseURL string, location []byte) string {
|
|||
return redirectURL
|
||||
}
|
||||
|
||||
// StatusCodeIsRedirect returns true if the status code indicates a redirect.
|
||||
func StatusCodeIsRedirect(statusCode int) bool {
|
||||
return statusCode == StatusMovedPermanently ||
|
||||
statusCode == StatusFound ||
|
||||
statusCode == StatusSeeOther ||
|
||||
statusCode == StatusTemporaryRedirect ||
|
||||
statusCode == StatusPermanentRedirect
|
||||
}
|
||||
|
||||
var (
|
||||
requestPool sync.Pool
|
||||
responsePool sync.Pool
|
||||
|
@ -1065,30 +961,6 @@ func (c *HostClient) DoDeadline(req *Request, resp *Response, deadline time.Time
|
|||
return clientDoDeadline(req, resp, deadline, c)
|
||||
}
|
||||
|
||||
// DoRedirects performs the given http request and fills the given http response,
|
||||
// following up to maxRedirectsCount redirects. When the redirect count exceeds
|
||||
// maxRedirectsCount, ErrTooManyRedirects is returned.
|
||||
//
|
||||
// Request must contain at least non-zero RequestURI with full url (including
|
||||
// scheme and host) or non-zero Host header + RequestURI.
|
||||
//
|
||||
// Client determines the server to be requested in the following order:
|
||||
//
|
||||
// - from RequestURI if it contains full url with scheme and host;
|
||||
// - from Host header otherwise.
|
||||
//
|
||||
// Response is ignored if resp is nil.
|
||||
//
|
||||
// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections
|
||||
// to the requested host are busy.
|
||||
//
|
||||
// It is recommended obtaining req and resp via AcquireRequest
|
||||
// and AcquireResponse in performance-critical code.
|
||||
func (c *HostClient) DoRedirects(req *Request, resp *Response, maxRedirectsCount int) error {
|
||||
_, _, err := doRequestFollowRedirects(req, resp, req.URI().String(), maxRedirectsCount, c)
|
||||
return err
|
||||
}
|
||||
|
||||
func clientDoTimeout(req *Request, resp *Response, timeout time.Duration, c clientDoer) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
return clientDoDeadline(req, resp, deadline, c)
|
||||
|
@ -1113,11 +985,9 @@ func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c client
|
|||
req.copyToSkipBody(reqCopy)
|
||||
swapRequestBody(req, reqCopy)
|
||||
respCopy := AcquireResponse()
|
||||
if resp != nil {
|
||||
// Not calling resp.copyToSkipBody(respCopy) here to avoid
|
||||
// unexpected messing with headers
|
||||
respCopy.SkipBody = resp.SkipBody
|
||||
}
|
||||
// Not calling resp.copyToSkipBody(respCopy) here to avoid
|
||||
// unexpected messing with headers
|
||||
respCopy.SkipBody = resp.SkipBody
|
||||
|
||||
// Note that the request continues execution on ErrTimeout until
|
||||
// client-specific ReadTimeout exceeds. This helps limiting load
|
||||
|
@ -1127,49 +997,36 @@ func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c client
|
|||
// concurrent requests, since timed out requests on client side
|
||||
// usually continue execution on the host.
|
||||
|
||||
var mu sync.Mutex
|
||||
var timedout bool
|
||||
|
||||
var cleanup int32
|
||||
go func() {
|
||||
reqCopy.timeout = timeout
|
||||
errDo := c.Do(reqCopy, respCopy)
|
||||
mu.Lock()
|
||||
{
|
||||
if !timedout {
|
||||
if resp != nil {
|
||||
respCopy.copyToSkipBody(resp)
|
||||
swapResponseBody(resp, respCopy)
|
||||
}
|
||||
swapRequestBody(reqCopy, req)
|
||||
ch <- errDo
|
||||
}
|
||||
if atomic.LoadInt32(&cleanup) == 1 {
|
||||
ReleaseResponse(respCopy)
|
||||
ReleaseRequest(reqCopy)
|
||||
errorChPool.Put(chv)
|
||||
} else {
|
||||
ch <- errDo
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
ReleaseResponse(respCopy)
|
||||
ReleaseRequest(reqCopy)
|
||||
}()
|
||||
|
||||
tc := AcquireTimer(timeout)
|
||||
var err error
|
||||
select {
|
||||
case err = <-ch:
|
||||
case <-tc.C:
|
||||
mu.Lock()
|
||||
{
|
||||
timedout = true
|
||||
err = ErrTimeout
|
||||
if resp != nil {
|
||||
respCopy.copyToSkipBody(resp)
|
||||
swapResponseBody(resp, respCopy)
|
||||
}
|
||||
mu.Unlock()
|
||||
swapRequestBody(reqCopy, req)
|
||||
ReleaseResponse(respCopy)
|
||||
ReleaseRequest(reqCopy)
|
||||
errorChPool.Put(chv)
|
||||
case <-tc.C:
|
||||
atomic.StoreInt32(&cleanup, 1)
|
||||
err = ErrTimeout
|
||||
}
|
||||
ReleaseTimer(tc)
|
||||
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
}
|
||||
errorChPool.Put(chv)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1197,7 +1054,6 @@ func (c *HostClient) Do(req *Request, resp *Response) error {
|
|||
maxAttempts = DefaultMaxIdemponentCallAttempts
|
||||
}
|
||||
attempts := 0
|
||||
hasBodyStream := req.IsBodyStream()
|
||||
|
||||
atomic.AddInt32(&c.pendingRequests, 1)
|
||||
for {
|
||||
|
@ -1206,9 +1062,6 @@ func (c *HostClient) Do(req *Request, resp *Response) error {
|
|||
break
|
||||
}
|
||||
|
||||
if hasBodyStream {
|
||||
break
|
||||
}
|
||||
if !isIdempotent(req) {
|
||||
// Retry non-idempotent requests if the server closes
|
||||
// the connection before sending the response.
|
||||
|
@ -1275,15 +1128,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
|||
|
||||
// Free up resources occupied by response before sending the request,
|
||||
// so the GC may reclaim these resources (e.g. response body).
|
||||
|
||||
// backing up SkipBody in case it was set explicitly
|
||||
customSkipBody := resp.SkipBody
|
||||
resp.Reset()
|
||||
resp.SkipBody = customSkipBody
|
||||
|
||||
if c.DisablePathNormalizing {
|
||||
req.URI().DisablePathNormalizing = true
|
||||
}
|
||||
|
||||
// If we detected a redirect to another schema
|
||||
if req.schemaUpdate {
|
||||
|
@ -1295,7 +1140,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
|||
req.SetConnectionClose()
|
||||
}
|
||||
|
||||
cc, err := c.acquireConn(req.timeout)
|
||||
cc, err := c.acquireConn()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -1350,7 +1195,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
|||
}
|
||||
}
|
||||
|
||||
if customSkipBody || !req.Header.IsGet() && req.Header.IsHead() {
|
||||
if !req.Header.IsGet() && req.Header.IsHead() {
|
||||
resp.SkipBody = true
|
||||
}
|
||||
if c.DisableHeaderNamesNormalizing {
|
||||
|
@ -1384,6 +1229,9 @@ var (
|
|||
// see this error.
|
||||
ErrNoFreeConns = errors.New("no free connections available to host")
|
||||
|
||||
// ErrTimeout is returned from timed out calls.
|
||||
ErrTimeout = errors.New("timeout")
|
||||
|
||||
// ErrConnectionClosed may be returned from client methods if the server
|
||||
// closes connection before returning the first response byte.
|
||||
//
|
||||
|
@ -1395,34 +1243,14 @@ var (
|
|||
"Make sure the server returns 'Connection: close' response header before closing the connection")
|
||||
)
|
||||
|
||||
type timeoutError struct {
|
||||
}
|
||||
|
||||
func (e *timeoutError) Error() string {
|
||||
return "timeout"
|
||||
}
|
||||
|
||||
// Only implement the Timeout() function of the net.Error interface.
|
||||
// This allows for checks like:
|
||||
//
|
||||
// if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() {
|
||||
func (e *timeoutError) Timeout() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrTimeout is returned from timed out calls.
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
// SetMaxConns sets up the maximum number of connections which may be established to all hosts listed in Addr.
|
||||
func (c *HostClient) SetMaxConns(newMaxConns int) {
|
||||
c.connsLock.Lock()
|
||||
c.MaxConns = newMaxConns
|
||||
c.connsLock.Unlock()
|
||||
}
|
||||
|
||||
func (c *HostClient) acquireConn(reqTimeout time.Duration) (cc *clientConn, err error) {
|
||||
func (c *HostClient) acquireConn() (*clientConn, error) {
|
||||
var cc *clientConn
|
||||
createConn := false
|
||||
startCleaner := false
|
||||
|
||||
|
@ -1454,47 +1282,7 @@ func (c *HostClient) acquireConn(reqTimeout time.Duration) (cc *clientConn, err
|
|||
return cc, nil
|
||||
}
|
||||
if !createConn {
|
||||
if c.MaxConnWaitTimeout <= 0 {
|
||||
return nil, ErrNoFreeConns
|
||||
}
|
||||
|
||||
// reqTimeout c.MaxConnWaitTimeout wait duration
|
||||
// d1 d2 min(d1, d2)
|
||||
// 0(not set) d2 d2
|
||||
// d1 0(don't wait) 0(don't wait)
|
||||
// 0(not set) d2 d2
|
||||
timeout := c.MaxConnWaitTimeout
|
||||
timeoutOverridden := false
|
||||
// reqTimeout == 0 means not set
|
||||
if reqTimeout > 0 && reqTimeout < timeout {
|
||||
timeout = reqTimeout
|
||||
timeoutOverridden = true
|
||||
}
|
||||
|
||||
// wait for a free connection
|
||||
tc := AcquireTimer(timeout)
|
||||
defer ReleaseTimer(tc)
|
||||
|
||||
w := &wantConn{
|
||||
ready: make(chan struct{}, 1),
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
w.cancel(c, err)
|
||||
}
|
||||
}()
|
||||
|
||||
c.queueForIdle(w)
|
||||
|
||||
select {
|
||||
case <-w.ready:
|
||||
return w.conn, w.err
|
||||
case <-tc.C:
|
||||
if timeoutOverridden {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return nil, ErrNoFreeConns
|
||||
}
|
||||
return nil, ErrNoFreeConns
|
||||
}
|
||||
|
||||
if startCleaner {
|
||||
|
@ -1511,33 +1299,6 @@ func (c *HostClient) acquireConn(reqTimeout time.Duration) (cc *clientConn, err
|
|||
return cc, nil
|
||||
}
|
||||
|
||||
func (c *HostClient) queueForIdle(w *wantConn) {
|
||||
c.connsLock.Lock()
|
||||
defer c.connsLock.Unlock()
|
||||
if c.connsWait == nil {
|
||||
c.connsWait = &wantConnQueue{}
|
||||
}
|
||||
c.connsWait.clearFront()
|
||||
c.connsWait.pushBack(w)
|
||||
}
|
||||
|
||||
func (c *HostClient) dialConnFor(w *wantConn) {
|
||||
conn, err := c.dialHostHard()
|
||||
|
||||
if err != nil {
|
||||
w.tryDeliver(nil, err)
|
||||
c.decConnsCount()
|
||||
return
|
||||
}
|
||||
|
||||
cc := acquireClientConn(conn)
|
||||
delivered := w.tryDeliver(cc, nil)
|
||||
if !delivered {
|
||||
// not delivered, return idle connection
|
||||
c.releaseConn(cc)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HostClient) connsCleaner() {
|
||||
var (
|
||||
scratch []*clientConn
|
||||
|
@ -1601,30 +1362,9 @@ func (c *HostClient) closeConn(cc *clientConn) {
|
|||
}
|
||||
|
||||
func (c *HostClient) decConnsCount() {
|
||||
if c.MaxConnWaitTimeout <= 0 {
|
||||
c.connsLock.Lock()
|
||||
c.connsCount--
|
||||
c.connsLock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
c.connsLock.Lock()
|
||||
defer c.connsLock.Unlock()
|
||||
dialed := false
|
||||
if q := c.connsWait; q != nil && q.len() > 0 {
|
||||
for q.len() > 0 {
|
||||
w := q.popFront()
|
||||
if w.waiting() {
|
||||
go c.dialConnFor(w)
|
||||
dialed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !dialed {
|
||||
c.connsCount--
|
||||
}
|
||||
|
||||
c.connsCount--
|
||||
c.connsLock.Unlock()
|
||||
}
|
||||
|
||||
func acquireClientConn(conn net.Conn) *clientConn {
|
||||
|
@ -1648,29 +1388,9 @@ var clientConnPool sync.Pool
|
|||
|
||||
func (c *HostClient) releaseConn(cc *clientConn) {
|
||||
cc.lastUseTime = time.Now()
|
||||
if c.MaxConnWaitTimeout <= 0 {
|
||||
c.connsLock.Lock()
|
||||
c.conns = append(c.conns, cc)
|
||||
c.connsLock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// try to deliver an idle connection to a *wantConn
|
||||
c.connsLock.Lock()
|
||||
defer c.connsLock.Unlock()
|
||||
delivered := false
|
||||
if q := c.connsWait; q != nil && q.len() > 0 {
|
||||
for q.len() > 0 {
|
||||
w := q.popFront()
|
||||
if w.waiting() {
|
||||
delivered = w.tryDeliver(cc, nil)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !delivered {
|
||||
c.conns = append(c.conns, cc)
|
||||
}
|
||||
c.conns = append(c.conns, cc)
|
||||
c.connsLock.Unlock()
|
||||
}
|
||||
|
||||
func (c *HostClient) acquireWriter(conn net.Conn) *bufio.Writer {
|
||||
|
@ -1713,7 +1433,34 @@ func newClientTLSConfig(c *tls.Config, addr string) *tls.Config {
|
|||
if c == nil {
|
||||
c = &tls.Config{}
|
||||
} else {
|
||||
c = c.Clone()
|
||||
// TODO: substitute this with c.Clone() after go1.8 becomes mainstream :)
|
||||
c = &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
|
||||
// Do not copy ClientAuth, since it is server-related stuff
|
||||
// Do not copy ClientCAs, since it is server-related stuff
|
||||
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
|
||||
// Do not copy PreferServerCipherSuites - this is server stuff
|
||||
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
|
||||
// Do not copy SessionTicketKey - this is server stuff
|
||||
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
}
|
||||
}
|
||||
|
||||
if c.ClientSessionCache == nil {
|
||||
|
@ -1776,7 +1523,7 @@ func (c *HostClient) dialHostHard() (conn net.Conn, err error) {
|
|||
for n > 0 {
|
||||
addr := c.nextAddr()
|
||||
tlsConfig := c.cachedTLSConfig(addr)
|
||||
conn, err = dialAddr(addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig, c.WriteTimeout)
|
||||
conn, err = dialAddr(addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig)
|
||||
if err == nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
@ -1807,44 +1554,7 @@ func (c *HostClient) cachedTLSConfig(addr string) *tls.Config {
|
|||
return cfg
|
||||
}
|
||||
|
||||
// ErrTLSHandshakeTimeout indicates there is a timeout from tls handshake.
|
||||
var ErrTLSHandshakeTimeout = errors.New("tls handshake timed out")
|
||||
|
||||
var timeoutErrorChPool sync.Pool
|
||||
|
||||
func tlsClientHandshake(rawConn net.Conn, tlsConfig *tls.Config, timeout time.Duration) (net.Conn, error) {
|
||||
tc := AcquireTimer(timeout)
|
||||
defer ReleaseTimer(tc)
|
||||
|
||||
var ch chan error
|
||||
chv := timeoutErrorChPool.Get()
|
||||
if chv == nil {
|
||||
chv = make(chan error)
|
||||
}
|
||||
ch = chv.(chan error)
|
||||
defer timeoutErrorChPool.Put(chv)
|
||||
|
||||
conn := tls.Client(rawConn, tlsConfig)
|
||||
|
||||
go func() {
|
||||
ch <- conn.Handshake()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-tc.C:
|
||||
rawConn.Close()
|
||||
<-ch
|
||||
return nil, ErrTLSHandshakeTimeout
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
rawConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *tls.Config, timeout time.Duration) (net.Conn, error) {
|
||||
func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *tls.Config) (net.Conn, error) {
|
||||
if dial == nil {
|
||||
if dialDualStack {
|
||||
dial = DialDualStack
|
||||
|
@ -1861,10 +1571,7 @@ func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *
|
|||
panic("BUG: DialFunc returned (nil, nil)")
|
||||
}
|
||||
if isTLS {
|
||||
if timeout == 0 {
|
||||
return tls.Client(conn, tlsConfig), nil
|
||||
}
|
||||
return tlsClientHandshake(conn, tlsConfig, timeout)
|
||||
conn = tls.Client(conn, tlsConfig)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
@ -1893,138 +1600,7 @@ func addMissingPort(addr string, isTLS bool) string {
|
|||
if isTLS {
|
||||
port = 443
|
||||
}
|
||||
return net.JoinHostPort(addr, strconv.Itoa(port))
|
||||
}
|
||||
|
||||
// A wantConn records state about a wanted connection
|
||||
// (that is, an active call to getConn).
|
||||
// The conn may be gotten by dialing or by finding an idle connection,
|
||||
// or a cancellation may make the conn no longer wanted.
|
||||
// These three options are racing against each other and use
|
||||
// wantConn to coordinate and agree about the winning outcome.
|
||||
//
|
||||
// inspired by net/http/transport.go
|
||||
type wantConn struct {
|
||||
ready chan struct{}
|
||||
mu sync.Mutex // protects conn, err, close(ready)
|
||||
conn *clientConn
|
||||
err error
|
||||
}
|
||||
|
||||
// waiting reports whether w is still waiting for an answer (connection or error).
|
||||
func (w *wantConn) waiting() bool {
|
||||
select {
|
||||
case <-w.ready:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// tryDeliver attempts to deliver conn, err to w and reports whether it succeeded.
|
||||
func (w *wantConn) tryDeliver(conn *clientConn, err error) bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.conn != nil || w.err != nil {
|
||||
return false
|
||||
}
|
||||
w.conn = conn
|
||||
w.err = err
|
||||
if w.conn == nil && w.err == nil {
|
||||
panic("fasthttp: internal error: misuse of tryDeliver")
|
||||
}
|
||||
close(w.ready)
|
||||
return true
|
||||
}
|
||||
|
||||
// cancel marks w as no longer wanting a result (for example, due to cancellation).
|
||||
// If a connection has been delivered already, cancel returns it with c.releaseConn.
|
||||
func (w *wantConn) cancel(c *HostClient, err error) {
|
||||
w.mu.Lock()
|
||||
if w.conn == nil && w.err == nil {
|
||||
close(w.ready) // catch misbehavior in future delivery
|
||||
}
|
||||
|
||||
conn := w.conn
|
||||
w.conn = nil
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
|
||||
if conn != nil {
|
||||
c.releaseConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// A wantConnQueue is a queue of wantConns.
|
||||
//
|
||||
// inspired by net/http/transport.go
|
||||
type wantConnQueue struct {
|
||||
// This is a queue, not a deque.
|
||||
// It is split into two stages - head[headPos:] and tail.
|
||||
// popFront is trivial (headPos++) on the first stage, and
|
||||
// pushBack is trivial (append) on the second stage.
|
||||
// If the first stage is empty, popFront can swap the
|
||||
// first and second stages to remedy the situation.
|
||||
//
|
||||
// This two-stage split is analogous to the use of two lists
|
||||
// in Okasaki's purely functional queue but without the
|
||||
// overhead of reversing the list when swapping stages.
|
||||
head []*wantConn
|
||||
headPos int
|
||||
tail []*wantConn
|
||||
}
|
||||
|
||||
// len returns the number of items in the queue.
|
||||
func (q *wantConnQueue) len() int {
|
||||
return len(q.head) - q.headPos + len(q.tail)
|
||||
}
|
||||
|
||||
// pushBack adds w to the back of the queue.
|
||||
func (q *wantConnQueue) pushBack(w *wantConn) {
|
||||
q.tail = append(q.tail, w)
|
||||
}
|
||||
|
||||
// popFront removes and returns the wantConn at the front of the queue.
|
||||
func (q *wantConnQueue) popFront() *wantConn {
|
||||
if q.headPos >= len(q.head) {
|
||||
if len(q.tail) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Pick up tail as new head, clear tail.
|
||||
q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
|
||||
}
|
||||
|
||||
w := q.head[q.headPos]
|
||||
q.head[q.headPos] = nil
|
||||
q.headPos++
|
||||
return w
|
||||
}
|
||||
|
||||
// peekFront returns the wantConn at the front of the queue without removing it.
|
||||
func (q *wantConnQueue) peekFront() *wantConn {
|
||||
|
||||
if q.headPos < len(q.head) {
|
||||
return q.head[q.headPos]
|
||||
}
|
||||
if len(q.tail) > 0 {
|
||||
return q.tail[0]
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// cleanFront pops any wantConns that are no longer waiting from the head of the
|
||||
// queue, reporting whether any were popped.
|
||||
func (q *wantConnQueue) clearFront() (cleaned bool) {
|
||||
for {
|
||||
w := q.peekFront()
|
||||
if w == nil || w.waiting() {
|
||||
return cleaned
|
||||
}
|
||||
q.popFront()
|
||||
cleaned = true
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", addr, port)
|
||||
}
|
||||
|
||||
// PipelineClient pipelines requests over a limited set of concurrent
|
||||
|
@ -2040,7 +1616,7 @@ func (q *wantConnQueue) clearFront() (cleaned bool) {
|
|||
// It is safe calling PipelineClient methods from concurrently running
|
||||
// goroutines.
|
||||
type PipelineClient struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Address of the host to connect to.
|
||||
Addr string
|
||||
|
@ -2120,7 +1696,7 @@ type PipelineClient struct {
|
|||
}
|
||||
|
||||
type pipelineConnClient struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
Addr string
|
||||
MaxPendingRequests int
|
||||
|
@ -2402,7 +1978,7 @@ func (c *pipelineConnClient) init() {
|
|||
|
||||
func (c *pipelineConnClient) worker() error {
|
||||
tlsConfig := c.cachedTLSConfig()
|
||||
conn, err := dialAddr(c.Addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig, c.WriteTimeout)
|
||||
conn, err := dialAddr(c.Addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
6
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
6
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
|
@ -134,7 +134,7 @@ var (
|
|||
// * CompressHuffmanOnly
|
||||
func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
|
||||
w := &byteSliceWriter{dst}
|
||||
WriteGzipLevel(w, src, level) //nolint:errcheck
|
||||
WriteGzipLevel(w, src, level)
|
||||
return w.b
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
|
|||
// * CompressHuffmanOnly
|
||||
func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
|
||||
w := &byteSliceWriter{dst}
|
||||
WriteDeflateLevel(w, src, level) //nolint:errcheck
|
||||
WriteDeflateLevel(w, src, level)
|
||||
return w.b
|
||||
}
|
||||
|
||||
|
@ -415,7 +415,7 @@ func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
|||
}
|
||||
_, err := copyZeroAlloc(zw, lr)
|
||||
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
|
||||
f.Seek(0, 0) //nolint:errcheck
|
||||
f.Seek(0, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
18
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
18
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
|
@ -31,9 +31,6 @@ const (
|
|||
CookieSameSiteLaxMode
|
||||
// CookieSameSiteStrictMode sets the SameSite flag with the "Strict" parameter
|
||||
CookieSameSiteStrictMode
|
||||
// CookieSameSiteNoneMode sets the SameSite flag with the "None" parameter
|
||||
// see https://tools.ietf.org/html/draft-west-cookie-incrementalism-00
|
||||
CookieSameSiteNoneMode
|
||||
)
|
||||
|
||||
// AcquireCookie returns an empty Cookie object from the pool.
|
||||
|
@ -65,7 +62,7 @@ var cookiePool = &sync.Pool{
|
|||
//
|
||||
// Cookie instance MUST NOT be used from concurrently running goroutines.
|
||||
type Cookie struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
key []byte
|
||||
value []byte
|
||||
|
@ -122,12 +119,8 @@ func (c *Cookie) SameSite() CookieSameSite {
|
|||
}
|
||||
|
||||
// SetSameSite sets the cookie's SameSite flag to the given value.
|
||||
// set value CookieSameSiteNoneMode will set Secure to true also to avoid browser rejection
|
||||
func (c *Cookie) SetSameSite(mode CookieSameSite) {
|
||||
c.sameSite = mode
|
||||
if mode == CookieSameSiteNoneMode {
|
||||
c.SetSecure(true)
|
||||
}
|
||||
}
|
||||
|
||||
// Path returns cookie path.
|
||||
|
@ -295,11 +288,6 @@ func (c *Cookie) AppendBytes(dst []byte) []byte {
|
|||
dst = append(dst, strCookieSameSite...)
|
||||
dst = append(dst, '=')
|
||||
dst = append(dst, strCookieSameSiteStrict...)
|
||||
case CookieSameSiteNoneMode:
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, strCookieSameSite...)
|
||||
dst = append(dst, '=')
|
||||
dst = append(dst, strCookieSameSiteNone...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
@ -398,10 +386,6 @@ func (c *Cookie) ParseBytes(src []byte) error {
|
|||
if caseInsensitiveCompare(strCookieSameSiteStrict, kv.value) {
|
||||
c.sameSite = CookieSameSiteStrictMode
|
||||
}
|
||||
case 'n': // "none"
|
||||
if caseInsensitiveCompare(strCookieSameSiteNone, kv.value) {
|
||||
c.sameSite = CookieSameSiteNoneMode
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
15
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
15
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
|
@ -1,14 +1,11 @@
|
|||
package fasthttputil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrInmemoryListenerClosed indicates that the InmemoryListener is already closed.
|
||||
var ErrInmemoryListenerClosed = errors.New("InmemoryListener is already closed: use of closed network connection")
|
||||
|
||||
// InmemoryListener provides in-memory dialer<->net.Listener implementation.
|
||||
//
|
||||
// It may be used either for fast in-process client<->server communications
|
||||
|
@ -39,7 +36,7 @@ func NewInmemoryListener() *InmemoryListener {
|
|||
func (ln *InmemoryListener) Accept() (net.Conn, error) {
|
||||
c, ok := <-ln.conns
|
||||
if !ok {
|
||||
return nil, ErrInmemoryListenerClosed
|
||||
return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection")
|
||||
}
|
||||
close(c.accepted)
|
||||
return c.conn, nil
|
||||
|
@ -54,7 +51,7 @@ func (ln *InmemoryListener) Close() error {
|
|||
close(ln.conns)
|
||||
ln.closed = true
|
||||
} else {
|
||||
err = ErrInmemoryListenerClosed
|
||||
err = fmt.Errorf("InmemoryListener is already closed")
|
||||
}
|
||||
ln.lock.Unlock()
|
||||
return err
|
||||
|
@ -84,14 +81,14 @@ func (ln *InmemoryListener) Dial() (net.Conn, error) {
|
|||
// Wait until the connection has been accepted.
|
||||
<-accepted
|
||||
} else {
|
||||
sConn.Close() //nolint:errcheck
|
||||
cConn.Close() //nolint:errcheck
|
||||
sConn.Close()
|
||||
cConn.Close()
|
||||
cConn = nil
|
||||
}
|
||||
ln.lock.Unlock()
|
||||
|
||||
if cConn == nil {
|
||||
return nil, ErrInmemoryListenerClosed
|
||||
return nil, fmt.Errorf("InmemoryListener is already closed")
|
||||
}
|
||||
return cConn, nil
|
||||
}
|
||||
|
|
40
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
40
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
|
@ -9,8 +9,6 @@ import (
|
|||
)
|
||||
|
||||
// NewPipeConns returns new bi-directional connection pipe.
|
||||
//
|
||||
// PipeConns is NOT safe for concurrent use by multiple goroutines!
|
||||
func NewPipeConns() *PipeConns {
|
||||
ch1 := make(chan *byteBuffer, 4)
|
||||
ch2 := make(chan *byteBuffer, 4)
|
||||
|
@ -40,7 +38,6 @@ func NewPipeConns() *PipeConns {
|
|||
// calling Read in order to unblock each Write call.
|
||||
// * It supports read and write deadlines.
|
||||
//
|
||||
// PipeConns is NOT safe for concurrent use by multiple goroutines!
|
||||
type PipeConns struct {
|
||||
c1 pipeConn
|
||||
c2 pipeConn
|
||||
|
@ -90,8 +87,6 @@ type pipeConn struct {
|
|||
|
||||
readDeadlineCh <-chan time.Time
|
||||
writeDeadlineCh <-chan time.Time
|
||||
|
||||
readDeadlineChLock sync.Mutex
|
||||
}
|
||||
|
||||
func (c *pipeConn) Write(p []byte) (int, error) {
|
||||
|
@ -163,15 +158,10 @@ func (c *pipeConn) readNextByteBuffer(mayBlock bool) error {
|
|||
if !mayBlock {
|
||||
return errWouldBlock
|
||||
}
|
||||
c.readDeadlineChLock.Lock()
|
||||
readDeadlineCh := c.readDeadlineCh
|
||||
c.readDeadlineChLock.Unlock()
|
||||
select {
|
||||
case c.b = <-c.rCh:
|
||||
case <-readDeadlineCh:
|
||||
c.readDeadlineChLock.Lock()
|
||||
case <-c.readDeadlineCh:
|
||||
c.readDeadlineCh = closedDeadlineCh
|
||||
c.readDeadlineChLock.Unlock()
|
||||
// rCh may contain data when deadline is reached.
|
||||
// Read the data before returning ErrTimeout.
|
||||
select {
|
||||
|
@ -197,26 +187,9 @@ func (c *pipeConn) readNextByteBuffer(mayBlock bool) error {
|
|||
var (
|
||||
errWouldBlock = errors.New("would block")
|
||||
errConnectionClosed = errors.New("connection closed")
|
||||
)
|
||||
|
||||
type timeoutError struct {
|
||||
}
|
||||
|
||||
func (e *timeoutError) Error() string {
|
||||
return "timeout"
|
||||
}
|
||||
|
||||
// Only implement the Timeout() function of the net.Error interface.
|
||||
// This allows for checks like:
|
||||
//
|
||||
// if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() {
|
||||
func (e *timeoutError) Timeout() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrTimeout is returned from Read() or Write() on timeout.
|
||||
ErrTimeout = &timeoutError{}
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
func (c *pipeConn) Close() error {
|
||||
|
@ -232,8 +205,8 @@ func (c *pipeConn) RemoteAddr() net.Addr {
|
|||
}
|
||||
|
||||
func (c *pipeConn) SetDeadline(deadline time.Time) error {
|
||||
c.SetReadDeadline(deadline) //nolint:errcheck
|
||||
c.SetWriteDeadline(deadline) //nolint:errcheck
|
||||
c.SetReadDeadline(deadline)
|
||||
c.SetWriteDeadline(deadline)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -241,10 +214,7 @@ func (c *pipeConn) SetReadDeadline(deadline time.Time) error {
|
|||
if c.readDeadlineTimer == nil {
|
||||
c.readDeadlineTimer = time.NewTimer(time.Hour)
|
||||
}
|
||||
readDeadlineCh := updateTimer(c.readDeadlineTimer, deadline)
|
||||
c.readDeadlineChLock.Lock()
|
||||
c.readDeadlineCh = readDeadlineCh
|
||||
c.readDeadlineChLock.Unlock()
|
||||
c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
6
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
6
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
|
@ -194,7 +194,7 @@ func NewPathPrefixStripper(prefixSize int) PathRewriteFunc {
|
|||
//
|
||||
// It is prohibited copying FS values. Create new values instead.
|
||||
type FS struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Path to the root directory to serve files from.
|
||||
Root string
|
||||
|
@ -1193,9 +1193,7 @@ func readFileHeader(f *os.File, compressed bool) ([]byte, error) {
|
|||
N: 512,
|
||||
}
|
||||
data, err := ioutil.ReadAll(lr)
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Seek(0, 0)
|
||||
|
||||
if zr != nil {
|
||||
releaseGzipReader(zr)
|
||||
|
|
22
vendor/github.com/valyala/fasthttp/fuzzit.sh
generated
vendored
22
vendor/github.com/valyala/fasthttp/fuzzit.sh
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
## go-fuzz doesn't support modules for now, so ensure we do everything
|
||||
## in the old style GOPATH way
|
||||
export GO111MODULE="off"
|
||||
|
||||
# We need to download these dependencies again after we set GO111MODULE="off"
|
||||
go get -t -v ./...
|
||||
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
wget -q -O fuzzitbin https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64
|
||||
chmod a+x fuzzitbin
|
||||
|
||||
for w in request response cookie url; do
|
||||
go-fuzz-build -libfuzzer -o fasthttp_$w.a ./fuzzit/$w/
|
||||
clang -fsanitize=fuzzer fasthttp_$w.a -o fasthttp_$w
|
||||
|
||||
./fuzzitbin create job --type $1 fasthttp/$w fasthttp_$w
|
||||
done
|
7
vendor/github.com/valyala/fasthttp/go.mod
generated
vendored
7
vendor/github.com/valyala/fasthttp/go.mod
generated
vendored
|
@ -1,10 +1,9 @@
|
|||
module github.com/valyala/fasthttp
|
||||
|
||||
go 1.11
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.10.4
|
||||
github.com/klauspost/compress v1.4.0
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3
|
||||
)
|
||||
|
|
14
vendor/github.com/valyala/fasthttp/go.sum
generated
vendored
14
vendor/github.com/valyala/fasthttp/go.sum
generated
vendored
|
@ -1,12 +1,10 @@
|
|||
github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
|
||||
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3 h1:czFLhve3vsQetD6JOJ8NZZvGQIXlnN3/yXxbT6/awxI=
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
309
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
309
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
|
@ -6,7 +6,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
@ -19,13 +18,12 @@ import (
|
|||
// ResponseHeader instance MUST NOT be used from concurrently running
|
||||
// goroutines.
|
||||
type ResponseHeader struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
disableNormalizing bool
|
||||
noHTTP11 bool
|
||||
connectionClose bool
|
||||
noDefaultContentType bool
|
||||
noDefaultDate bool
|
||||
|
||||
statusCode int
|
||||
contentLength int
|
||||
|
@ -48,7 +46,7 @@ type ResponseHeader struct {
|
|||
// RequestHeader instance MUST NOT be used from concurrently running
|
||||
// goroutines.
|
||||
type RequestHeader struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
disableNormalizing bool
|
||||
noHTTP11 bool
|
||||
|
@ -180,13 +178,13 @@ func (h *RequestHeader) ResetConnectionClose() {
|
|||
|
||||
// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set.
|
||||
func (h *ResponseHeader) ConnectionUpgrade() bool {
|
||||
return hasHeaderValue(h.Peek(HeaderConnection), strUpgrade)
|
||||
return hasHeaderValue(h.Peek("Connection"), strUpgrade)
|
||||
}
|
||||
|
||||
// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set.
|
||||
func (h *RequestHeader) ConnectionUpgrade() bool {
|
||||
h.parseRawHeaders()
|
||||
return hasHeaderValue(h.Peek(HeaderConnection), strUpgrade)
|
||||
return hasHeaderValue(h.Peek("Connection"), strUpgrade)
|
||||
}
|
||||
|
||||
// PeekCookie is able to returns cookie by a given key from response.
|
||||
|
@ -246,6 +244,9 @@ func (h *ResponseHeader) mustSkipContentLength() bool {
|
|||
// It may be negative:
|
||||
// -1 means Transfer-Encoding: chunked.
|
||||
func (h *RequestHeader) ContentLength() int {
|
||||
if h.ignoreBody() {
|
||||
return 0
|
||||
}
|
||||
return h.realContentLength()
|
||||
}
|
||||
|
||||
|
@ -642,7 +643,6 @@ func (h *ResponseHeader) DisableNormalizing() {
|
|||
func (h *ResponseHeader) Reset() {
|
||||
h.disableNormalizing = false
|
||||
h.noDefaultContentType = false
|
||||
h.noDefaultDate = false
|
||||
h.resetSkipNormalize()
|
||||
}
|
||||
|
||||
|
@ -696,7 +696,6 @@ func (h *ResponseHeader) CopyTo(dst *ResponseHeader) {
|
|||
dst.noHTTP11 = h.noHTTP11
|
||||
dst.connectionClose = h.connectionClose
|
||||
dst.noDefaultContentType = h.noDefaultContentType
|
||||
dst.noDefaultDate = h.noDefaultDate
|
||||
|
||||
dst.statusCode = h.statusCode
|
||||
dst.contentLength = h.contentLength
|
||||
|
@ -846,16 +845,16 @@ func (h *ResponseHeader) DelBytes(key []byte) {
|
|||
|
||||
func (h *ResponseHeader) del(key []byte) {
|
||||
switch string(key) {
|
||||
case HeaderContentType:
|
||||
case "Content-Type":
|
||||
h.contentType = h.contentType[:0]
|
||||
case HeaderServer:
|
||||
case "Server":
|
||||
h.server = h.server[:0]
|
||||
case HeaderSetCookie:
|
||||
case "Set-Cookie":
|
||||
h.cookies = h.cookies[:0]
|
||||
case HeaderContentLength:
|
||||
case "Content-Length":
|
||||
h.contentLength = 0
|
||||
h.contentLengthBytes = h.contentLengthBytes[:0]
|
||||
case HeaderConnection:
|
||||
case "Connection":
|
||||
h.connectionClose = false
|
||||
}
|
||||
h.h = delAllArgsBytes(h.h, key)
|
||||
|
@ -878,18 +877,18 @@ func (h *RequestHeader) DelBytes(key []byte) {
|
|||
|
||||
func (h *RequestHeader) del(key []byte) {
|
||||
switch string(key) {
|
||||
case HeaderHost:
|
||||
case "Host":
|
||||
h.host = h.host[:0]
|
||||
case HeaderContentType:
|
||||
case "Content-Type":
|
||||
h.contentType = h.contentType[:0]
|
||||
case HeaderUserAgent:
|
||||
case "User-Agent":
|
||||
h.userAgent = h.userAgent[:0]
|
||||
case HeaderCookie:
|
||||
case "Cookie":
|
||||
h.cookies = h.cookies[:0]
|
||||
case HeaderContentLength:
|
||||
case "Content-Length":
|
||||
h.contentLength = 0
|
||||
h.contentLengthBytes = h.contentLengthBytes[:0]
|
||||
case HeaderConnection:
|
||||
case "Connection":
|
||||
h.connectionClose = false
|
||||
}
|
||||
h.h = delAllArgsBytes(h.h, key)
|
||||
|
@ -965,30 +964,30 @@ func (h *ResponseHeader) SetBytesKV(key, value []byte) {
|
|||
// key is in canonical form.
|
||||
func (h *ResponseHeader) SetCanonical(key, value []byte) {
|
||||
switch string(key) {
|
||||
case HeaderContentType:
|
||||
case "Content-Type":
|
||||
h.SetContentTypeBytes(value)
|
||||
case HeaderServer:
|
||||
case "Server":
|
||||
h.SetServerBytes(value)
|
||||
case HeaderSetCookie:
|
||||
case "Set-Cookie":
|
||||
var kv *argsKV
|
||||
h.cookies, kv = allocArg(h.cookies)
|
||||
kv.key = getCookieKey(kv.key, value)
|
||||
kv.value = append(kv.value[:0], value...)
|
||||
case HeaderContentLength:
|
||||
case "Content-Length":
|
||||
if contentLength, err := parseContentLength(value); err == nil {
|
||||
h.contentLength = contentLength
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], value...)
|
||||
}
|
||||
case HeaderConnection:
|
||||
case "Connection":
|
||||
if bytes.Equal(strClose, value) {
|
||||
h.SetConnectionClose()
|
||||
} else {
|
||||
h.ResetConnectionClose()
|
||||
h.h = setArgBytes(h.h, key, value, argsHasValue)
|
||||
}
|
||||
case HeaderTransferEncoding:
|
||||
case "Transfer-Encoding":
|
||||
// Transfer-Encoding is managed automatically.
|
||||
case HeaderDate:
|
||||
case "Date":
|
||||
// Date is managed automatically.
|
||||
default:
|
||||
h.h = setArgBytes(h.h, key, value, argsHasValue)
|
||||
|
@ -1150,28 +1149,28 @@ func (h *RequestHeader) SetBytesKV(key, value []byte) {
|
|||
func (h *RequestHeader) SetCanonical(key, value []byte) {
|
||||
h.parseRawHeaders()
|
||||
switch string(key) {
|
||||
case HeaderHost:
|
||||
case "Host":
|
||||
h.SetHostBytes(value)
|
||||
case HeaderContentType:
|
||||
case "Content-Type":
|
||||
h.SetContentTypeBytes(value)
|
||||
case HeaderUserAgent:
|
||||
case "User-Agent":
|
||||
h.SetUserAgentBytes(value)
|
||||
case HeaderCookie:
|
||||
case "Cookie":
|
||||
h.collectCookies()
|
||||
h.cookies = parseRequestCookies(h.cookies, value)
|
||||
case HeaderContentLength:
|
||||
case "Content-Length":
|
||||
if contentLength, err := parseContentLength(value); err == nil {
|
||||
h.contentLength = contentLength
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], value...)
|
||||
}
|
||||
case HeaderConnection:
|
||||
case "Connection":
|
||||
if bytes.Equal(strClose, value) {
|
||||
h.SetConnectionClose()
|
||||
} else {
|
||||
h.ResetConnectionClose()
|
||||
h.h = setArgBytes(h.h, key, value, argsHasValue)
|
||||
}
|
||||
case HeaderTransferEncoding:
|
||||
case "Transfer-Encoding":
|
||||
// Transfer-Encoding is managed automatically.
|
||||
default:
|
||||
h.h = setArgBytes(h.h, key, value, argsHasValue)
|
||||
|
@ -1218,18 +1217,18 @@ func (h *RequestHeader) PeekBytes(key []byte) []byte {
|
|||
|
||||
func (h *ResponseHeader) peek(key []byte) []byte {
|
||||
switch string(key) {
|
||||
case HeaderContentType:
|
||||
case "Content-Type":
|
||||
return h.ContentType()
|
||||
case HeaderServer:
|
||||
case "Server":
|
||||
return h.Server()
|
||||
case HeaderConnection:
|
||||
case "Connection":
|
||||
if h.ConnectionClose() {
|
||||
return strClose
|
||||
}
|
||||
return peekArgBytes(h.h, key)
|
||||
case HeaderContentLength:
|
||||
case "Content-Length":
|
||||
return h.contentLengthBytes
|
||||
case HeaderSetCookie:
|
||||
case "Set-Cookie":
|
||||
return appendResponseCookieBytes(nil, h.cookies)
|
||||
default:
|
||||
return peekArgBytes(h.h, key)
|
||||
|
@ -1239,24 +1238,25 @@ func (h *ResponseHeader) peek(key []byte) []byte {
|
|||
func (h *RequestHeader) peek(key []byte) []byte {
|
||||
h.parseRawHeaders()
|
||||
switch string(key) {
|
||||
case HeaderHost:
|
||||
case "Host":
|
||||
return h.Host()
|
||||
case HeaderContentType:
|
||||
case "Content-Type":
|
||||
return h.ContentType()
|
||||
case HeaderUserAgent:
|
||||
case "User-Agent":
|
||||
return h.UserAgent()
|
||||
case HeaderConnection:
|
||||
case "Connection":
|
||||
if h.ConnectionClose() {
|
||||
return strClose
|
||||
}
|
||||
return peekArgBytes(h.h, key)
|
||||
case HeaderContentLength:
|
||||
case "Content-Length":
|
||||
return h.contentLengthBytes
|
||||
case HeaderCookie:
|
||||
case "Cookie":
|
||||
if h.cookiesCollected {
|
||||
return appendRequestCookieBytes(nil, h.cookies)
|
||||
} else {
|
||||
return peekArgBytes(h.h, key)
|
||||
}
|
||||
return peekArgBytes(h.h, key)
|
||||
default:
|
||||
return peekArgBytes(h.h, key)
|
||||
}
|
||||
|
@ -1284,7 +1284,7 @@ func (h *ResponseHeader) Cookie(cookie *Cookie) bool {
|
|||
if v == nil {
|
||||
return false
|
||||
}
|
||||
cookie.ParseBytes(v) //nolint:errcheck
|
||||
cookie.ParseBytes(v)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -1310,11 +1310,7 @@ func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error {
|
|||
h.resetSkipNormalize()
|
||||
b, err := r.Peek(n)
|
||||
if len(b) == 0 {
|
||||
// Return ErrTimeout on any timeout.
|
||||
if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() {
|
||||
return ErrTimeout
|
||||
}
|
||||
// treat all other errors on the first byte read as EOF
|
||||
// treat all errors on the first byte read as EOF
|
||||
if n == 1 || err == io.EOF {
|
||||
return io.EOF
|
||||
}
|
||||
|
@ -1400,10 +1396,9 @@ func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error {
|
|||
}
|
||||
}
|
||||
|
||||
// n == 1 on the first read for the request.
|
||||
if n == 1 {
|
||||
// We didn't read a single byte.
|
||||
return errNothingRead{err}
|
||||
return errNothingRead
|
||||
}
|
||||
|
||||
return fmt.Errorf("error when reading request headers: %s", err)
|
||||
|
@ -1441,7 +1436,7 @@ func isOnlyCRLF(b []byte) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func updateServerDate() {
|
||||
func init() {
|
||||
refreshServerDate()
|
||||
go func() {
|
||||
for {
|
||||
|
@ -1451,10 +1446,7 @@ func updateServerDate() {
|
|||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
serverDate atomic.Value
|
||||
serverDateOnce sync.Once // serverDateOnce.Do(updateServerDate)
|
||||
)
|
||||
var serverDate atomic.Value
|
||||
|
||||
func refreshServerDate() {
|
||||
b := AppendHTTPDate(nil, time.Now())
|
||||
|
@ -1501,20 +1493,13 @@ func (h *ResponseHeader) AppendBytes(dst []byte) []byte {
|
|||
if len(server) != 0 {
|
||||
dst = appendHeaderLine(dst, strServer, server)
|
||||
}
|
||||
|
||||
if !h.noDefaultDate {
|
||||
serverDateOnce.Do(updateServerDate)
|
||||
dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte))
|
||||
}
|
||||
dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte))
|
||||
|
||||
// Append Content-Type only for non-zero responses
|
||||
// or if it is explicitly set.
|
||||
// See https://github.com/valyala/fasthttp/issues/28 .
|
||||
if h.ContentLength() != 0 || len(h.contentType) > 0 {
|
||||
contentType := h.ContentType()
|
||||
if len(contentType) > 0 {
|
||||
dst = appendHeaderLine(dst, strContentType, contentType)
|
||||
}
|
||||
dst = appendHeaderLine(dst, strContentType, h.ContentType())
|
||||
}
|
||||
|
||||
if len(h.contentLengthBytes) > 0 {
|
||||
|
@ -1523,7 +1508,7 @@ func (h *ResponseHeader) AppendBytes(dst []byte) []byte {
|
|||
|
||||
for i, n := 0, len(h.h); i < n; i++ {
|
||||
kv := &h.h[i]
|
||||
if h.noDefaultDate || !bytes.Equal(kv.key, strDate) {
|
||||
if !bytes.Equal(kv.key, strDate) {
|
||||
dst = appendHeaderLine(dst, kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
@ -1610,14 +1595,17 @@ func (h *RequestHeader) AppendBytes(dst []byte) []byte {
|
|||
}
|
||||
|
||||
contentType := h.ContentType()
|
||||
if len(contentType) == 0 && !h.ignoreBody() {
|
||||
contentType = strPostArgsContentType
|
||||
}
|
||||
if len(contentType) > 0 {
|
||||
if !h.ignoreBody() {
|
||||
if len(contentType) == 0 {
|
||||
contentType = strPostArgsContentType
|
||||
}
|
||||
dst = appendHeaderLine(dst, strContentType, contentType)
|
||||
|
||||
if len(h.contentLengthBytes) > 0 {
|
||||
dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes)
|
||||
}
|
||||
} else if len(contentType) > 0 {
|
||||
dst = appendHeaderLine(dst, strContentType, contentType)
|
||||
}
|
||||
if len(h.contentLengthBytes) > 0 {
|
||||
dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes)
|
||||
}
|
||||
|
||||
for i, n := 0, len(h.h); i < n; i++ {
|
||||
|
@ -1671,19 +1659,23 @@ func (h *RequestHeader) parse(buf []byte) (int, error) {
|
|||
return 0, err
|
||||
}
|
||||
|
||||
var n int
|
||||
var rawHeaders []byte
|
||||
rawHeaders, _, err = readRawHeaders(h.rawHeaders[:0], buf[m:])
|
||||
rawHeaders, n, err = readRawHeaders(h.rawHeaders[:0], buf[m:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
h.rawHeadersCopy = append(h.rawHeadersCopy[:0], rawHeaders...)
|
||||
var n int
|
||||
n, err = h.parseHeaders(buf[m:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if !h.ignoreBody() || h.noHTTP11 {
|
||||
n, err = h.parseHeaders(buf[m:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
h.rawHeaders = append(h.rawHeaders[:0], buf[m:m+n]...)
|
||||
h.rawHeadersParsed = true
|
||||
} else {
|
||||
h.rawHeaders = rawHeaders
|
||||
}
|
||||
h.rawHeaders = append(h.rawHeaders[:0], buf[m:m+n]...)
|
||||
h.rawHeadersParsed = true
|
||||
return m + n, nil
|
||||
}
|
||||
|
||||
|
@ -1898,13 +1890,6 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) {
|
|||
var err error
|
||||
for s.next() {
|
||||
if len(s.key) > 0 {
|
||||
// Spaces between the header key and colon are not allowed.
|
||||
// See RFC 7230, Section 3.2.4.
|
||||
if bytes.IndexByte(s.key, ' ') != -1 || bytes.IndexByte(s.key, '\t') != -1 {
|
||||
err = fmt.Errorf("invalid header key %q", s.key)
|
||||
continue
|
||||
}
|
||||
|
||||
switch s.key[0] | 0x20 {
|
||||
case 'h':
|
||||
if caseInsensitiveCompare(s.key, strHost) {
|
||||
|
@ -1923,11 +1908,7 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) {
|
|||
}
|
||||
if caseInsensitiveCompare(s.key, strContentLength) {
|
||||
if h.contentLength != -1 {
|
||||
var nerr error
|
||||
if h.contentLength, nerr = parseContentLength(s.value); nerr != nil {
|
||||
if err == nil {
|
||||
err = nerr
|
||||
}
|
||||
if h.contentLength, err = parseContentLength(s.value); err != nil {
|
||||
h.contentLength = -2
|
||||
} else {
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
|
||||
|
@ -1956,12 +1937,9 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) {
|
|||
}
|
||||
h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue)
|
||||
}
|
||||
if s.err != nil && err == nil {
|
||||
err = s.err
|
||||
}
|
||||
if err != nil {
|
||||
if s.err != nil {
|
||||
h.connectionClose = true
|
||||
return 0, err
|
||||
return 0, s.err
|
||||
}
|
||||
|
||||
if h.contentLength < 0 {
|
||||
|
@ -1983,7 +1961,7 @@ func (h *RequestHeader) parseRawHeaders() {
|
|||
if len(h.rawHeaders) == 0 {
|
||||
return
|
||||
}
|
||||
h.parseHeaders(h.rawHeaders) //nolint:errcheck
|
||||
h.parseHeaders(h.rawHeaders)
|
||||
}
|
||||
|
||||
func (h *RequestHeader) collectCookies() {
|
||||
|
@ -2027,24 +2005,9 @@ type headerScanner struct {
|
|||
hLen int
|
||||
|
||||
disableNormalizing bool
|
||||
|
||||
// by checking whether the next line contains a colon or not to tell
|
||||
// it's a header entry or a multi line value of current header entry.
|
||||
// the side effect of this operation is that we know the index of the
|
||||
// next colon and new line, so this can be used during next iteration,
|
||||
// instead of find them again.
|
||||
nextColon int
|
||||
nextNewLine int
|
||||
|
||||
initialized bool
|
||||
}
|
||||
|
||||
func (s *headerScanner) next() bool {
|
||||
if !s.initialized {
|
||||
s.nextColon = -1
|
||||
s.nextNewLine = -1
|
||||
s.initialized = true
|
||||
}
|
||||
bLen := len(s.b)
|
||||
if bLen >= 2 && s.b[0] == '\r' && s.b[1] == '\n' {
|
||||
s.b = s.b[2:]
|
||||
|
@ -2056,27 +2019,7 @@ func (s *headerScanner) next() bool {
|
|||
s.hLen++
|
||||
return false
|
||||
}
|
||||
var n int
|
||||
if s.nextColon >= 0 {
|
||||
n = s.nextColon
|
||||
s.nextColon = -1
|
||||
} else {
|
||||
n = bytes.IndexByte(s.b, ':')
|
||||
|
||||
// There can't be a \n inside the header name, check for this.
|
||||
x := bytes.IndexByte(s.b, '\n')
|
||||
if x < 0 {
|
||||
// A header name should always at some point be followed by a \n
|
||||
// even if it's the one that terminates the header block.
|
||||
s.err = errNeedMore
|
||||
return false
|
||||
}
|
||||
if x < n {
|
||||
// There was a \n before the :
|
||||
s.err = errInvalidName
|
||||
return false
|
||||
}
|
||||
}
|
||||
n := bytes.IndexByte(s.b, ':')
|
||||
if n < 0 {
|
||||
s.err = errNeedMore
|
||||
return false
|
||||
|
@ -2086,51 +2029,14 @@ func (s *headerScanner) next() bool {
|
|||
n++
|
||||
for len(s.b) > n && s.b[n] == ' ' {
|
||||
n++
|
||||
// the newline index is a relative index, and lines below trimed `s.b` by `n`,
|
||||
// so the relative newline index also shifted forward. it's safe to decrease
|
||||
// to a minus value, it means it's invalid, and will find the newline again.
|
||||
s.nextNewLine--
|
||||
}
|
||||
s.hLen += n
|
||||
s.b = s.b[n:]
|
||||
if s.nextNewLine >= 0 {
|
||||
n = s.nextNewLine
|
||||
s.nextNewLine = -1
|
||||
} else {
|
||||
n = bytes.IndexByte(s.b, '\n')
|
||||
}
|
||||
n = bytes.IndexByte(s.b, '\n')
|
||||
if n < 0 {
|
||||
s.err = errNeedMore
|
||||
return false
|
||||
}
|
||||
isMultiLineValue := false
|
||||
for {
|
||||
if n+1 >= len(s.b) {
|
||||
break
|
||||
}
|
||||
if s.b[n+1] != ' ' && s.b[n+1] != '\t' {
|
||||
break
|
||||
}
|
||||
d := bytes.IndexByte(s.b[n+1:], '\n')
|
||||
if d <= 0 {
|
||||
break
|
||||
} else if d == 1 && s.b[n+1] == '\r' {
|
||||
break
|
||||
}
|
||||
e := n + d + 1
|
||||
if c := bytes.IndexByte(s.b[n+1:e], ':'); c >= 0 {
|
||||
s.nextColon = c
|
||||
s.nextNewLine = d - c - 1
|
||||
break
|
||||
}
|
||||
isMultiLineValue = true
|
||||
n = e
|
||||
}
|
||||
if n >= len(s.b) {
|
||||
s.err = errNeedMore
|
||||
return false
|
||||
}
|
||||
oldB := s.b
|
||||
s.value = s.b[:n]
|
||||
s.hLen += n + 1
|
||||
s.b = s.b[n+1:]
|
||||
|
@ -2142,9 +2048,6 @@ func (s *headerScanner) next() bool {
|
|||
n--
|
||||
}
|
||||
s.value = s.value[:n]
|
||||
if isMultiLineValue {
|
||||
s.value, s.b, s.hLen = normalizeHeaderValue(s.value, oldB, s.hLen)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -2213,52 +2116,6 @@ func getHeaderKeyBytes(kv *argsKV, key string, disableNormalizing bool) []byte {
|
|||
return kv.key
|
||||
}
|
||||
|
||||
func normalizeHeaderValue(ov, ob []byte, headerLength int) (nv, nb []byte, nhl int) {
|
||||
nv = ov
|
||||
length := len(ov)
|
||||
if length <= 0 {
|
||||
return
|
||||
}
|
||||
write := 0
|
||||
shrunk := 0
|
||||
lineStart := false
|
||||
for read := 0; read < length; read++ {
|
||||
c := ov[read]
|
||||
if c == '\r' || c == '\n' {
|
||||
shrunk++
|
||||
if c == '\n' {
|
||||
lineStart = true
|
||||
}
|
||||
continue
|
||||
} else if lineStart && c == '\t' {
|
||||
c = ' '
|
||||
} else {
|
||||
lineStart = false
|
||||
}
|
||||
nv[write] = c
|
||||
write++
|
||||
}
|
||||
|
||||
nv = nv[:write]
|
||||
copy(ob[write:], ob[write+shrunk:])
|
||||
|
||||
// Check if we need to skip \r\n or just \n
|
||||
skip := 0
|
||||
if ob[write] == '\r' {
|
||||
if ob[write+1] == '\n' {
|
||||
skip += 2
|
||||
} else {
|
||||
skip++
|
||||
}
|
||||
} else if ob[write] == '\n' {
|
||||
skip++
|
||||
}
|
||||
|
||||
nb = ob[write+skip : len(ob)-shrunk]
|
||||
nhl = headerLength - shrunk
|
||||
return
|
||||
}
|
||||
|
||||
func normalizeHeaderKey(b []byte, disableNormalizing bool) {
|
||||
if disableNormalizing {
|
||||
return
|
||||
|
@ -2315,14 +2172,10 @@ func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte {
|
|||
|
||||
var (
|
||||
errNeedMore = errors.New("need more data: cannot find trailing lf")
|
||||
errInvalidName = errors.New("invalid header name")
|
||||
errSmallBuffer = errors.New("small read buffer. Increase ReadBufferSize")
|
||||
errNothingRead = errors.New("read timeout with nothing read")
|
||||
)
|
||||
|
||||
type errNothingRead struct {
|
||||
error
|
||||
}
|
||||
|
||||
// ErrSmallBuffer is returned when the provided buffer size is too small
|
||||
// for reading request and/or response headers.
|
||||
//
|
||||
|
|
164
vendor/github.com/valyala/fasthttp/headers.go
generated
vendored
164
vendor/github.com/valyala/fasthttp/headers.go
generated
vendored
|
@ -1,164 +0,0 @@
|
|||
package fasthttp
|
||||
|
||||
// Headers
|
||||
const (
|
||||
// Authentication
|
||||
HeaderAuthorization = "Authorization"
|
||||
HeaderProxyAuthenticate = "Proxy-Authenticate"
|
||||
HeaderProxyAuthorization = "Proxy-Authorization"
|
||||
HeaderWWWAuthenticate = "WWW-Authenticate"
|
||||
|
||||
// Caching
|
||||
HeaderAge = "Age"
|
||||
HeaderCacheControl = "Cache-Control"
|
||||
HeaderClearSiteData = "Clear-Site-Data"
|
||||
HeaderExpires = "Expires"
|
||||
HeaderPragma = "Pragma"
|
||||
HeaderWarning = "Warning"
|
||||
|
||||
// Client hints
|
||||
HeaderAcceptCH = "Accept-CH"
|
||||
HeaderAcceptCHLifetime = "Accept-CH-Lifetime"
|
||||
HeaderContentDPR = "Content-DPR"
|
||||
HeaderDPR = "DPR"
|
||||
HeaderEarlyData = "Early-Data"
|
||||
HeaderSaveData = "Save-Data"
|
||||
HeaderViewportWidth = "Viewport-Width"
|
||||
HeaderWidth = "Width"
|
||||
|
||||
// Conditionals
|
||||
HeaderETag = "ETag"
|
||||
HeaderIfMatch = "If-Match"
|
||||
HeaderIfModifiedSince = "If-Modified-Since"
|
||||
HeaderIfNoneMatch = "If-None-Match"
|
||||
HeaderIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
HeaderLastModified = "Last-Modified"
|
||||
HeaderVary = "Vary"
|
||||
|
||||
// Connection management
|
||||
HeaderConnection = "Connection"
|
||||
HeaderKeepAlive = "Keep-Alive"
|
||||
|
||||
// Content negotiation
|
||||
HeaderAccept = "Accept"
|
||||
HeaderAcceptCharset = "Accept-Charset"
|
||||
HeaderAcceptEncoding = "Accept-Encoding"
|
||||
HeaderAcceptLanguage = "Accept-Language"
|
||||
|
||||
// Controls
|
||||
HeaderCookie = "Cookie"
|
||||
HeaderExpect = "Expect"
|
||||
HeaderMaxForwards = "Max-Forwards"
|
||||
HeaderSetCookie = "Set-Cookie"
|
||||
|
||||
// CORS
|
||||
HeaderAccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
||||
HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
||||
HeaderAccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
HeaderAccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
||||
HeaderAccessControlMaxAge = "Access-Control-Max-Age"
|
||||
HeaderAccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
HeaderAccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
HeaderOrigin = "Origin"
|
||||
HeaderTimingAllowOrigin = "Timing-Allow-Origin"
|
||||
HeaderXPermittedCrossDomainPolicies = "X-Permitted-Cross-Domain-Policies"
|
||||
|
||||
// Do Not Track
|
||||
HeaderDNT = "DNT"
|
||||
HeaderTk = "Tk"
|
||||
|
||||
// Downloads
|
||||
HeaderContentDisposition = "Content-Disposition"
|
||||
|
||||
// Message body information
|
||||
HeaderContentEncoding = "Content-Encoding"
|
||||
HeaderContentLanguage = "Content-Language"
|
||||
HeaderContentLength = "Content-Length"
|
||||
HeaderContentLocation = "Content-Location"
|
||||
HeaderContentType = "Content-Type"
|
||||
|
||||
// Proxies
|
||||
HeaderForwarded = "Forwarded"
|
||||
HeaderVia = "Via"
|
||||
HeaderXForwardedFor = "X-Forwarded-For"
|
||||
HeaderXForwardedHost = "X-Forwarded-Host"
|
||||
HeaderXForwardedProto = "X-Forwarded-Proto"
|
||||
|
||||
// Redirects
|
||||
HeaderLocation = "Location"
|
||||
|
||||
// Request context
|
||||
HeaderFrom = "From"
|
||||
HeaderHost = "Host"
|
||||
HeaderReferer = "Referer"
|
||||
HeaderReferrerPolicy = "Referrer-Policy"
|
||||
HeaderUserAgent = "User-Agent"
|
||||
|
||||
// Response context
|
||||
HeaderAllow = "Allow"
|
||||
HeaderServer = "Server"
|
||||
|
||||
// Range requests
|
||||
HeaderAcceptRanges = "Accept-Ranges"
|
||||
HeaderContentRange = "Content-Range"
|
||||
HeaderIfRange = "If-Range"
|
||||
HeaderRange = "Range"
|
||||
|
||||
// Security
|
||||
HeaderContentSecurityPolicy = "Content-Security-Policy"
|
||||
HeaderContentSecurityPolicyReportOnly = "Content-Security-Policy-Report-Only"
|
||||
HeaderCrossOriginResourcePolicy = "Cross-Origin-Resource-Policy"
|
||||
HeaderExpectCT = "Expect-CT"
|
||||
HeaderFeaturePolicy = "Feature-Policy"
|
||||
HeaderPublicKeyPins = "Public-Key-Pins"
|
||||
HeaderPublicKeyPinsReportOnly = "Public-Key-Pins-Report-Only"
|
||||
HeaderStrictTransportSecurity = "Strict-Transport-Security"
|
||||
HeaderUpgradeInsecureRequests = "Upgrade-Insecure-Requests"
|
||||
HeaderXContentTypeOptions = "X-Content-Type-Options"
|
||||
HeaderXDownloadOptions = "X-Download-Options"
|
||||
HeaderXFrameOptions = "X-Frame-Options"
|
||||
HeaderXPoweredBy = "X-Powered-By"
|
||||
HeaderXXSSProtection = "X-XSS-Protection"
|
||||
|
||||
// Server-sent event
|
||||
HeaderLastEventID = "Last-Event-ID"
|
||||
HeaderNEL = "NEL"
|
||||
HeaderPingFrom = "Ping-From"
|
||||
HeaderPingTo = "Ping-To"
|
||||
HeaderReportTo = "Report-To"
|
||||
|
||||
// Transfer coding
|
||||
HeaderTE = "TE"
|
||||
HeaderTrailer = "Trailer"
|
||||
HeaderTransferEncoding = "Transfer-Encoding"
|
||||
|
||||
// WebSockets
|
||||
HeaderSecWebSocketAccept = "Sec-WebSocket-Accept"
|
||||
HeaderSecWebSocketExtensions = "Sec-WebSocket-Extensions"
|
||||
HeaderSecWebSocketKey = "Sec-WebSocket-Key"
|
||||
HeaderSecWebSocketProtocol = "Sec-WebSocket-Protocol"
|
||||
HeaderSecWebSocketVersion = "Sec-WebSocket-Version"
|
||||
|
||||
// Other
|
||||
HeaderAcceptPatch = "Accept-Patch"
|
||||
HeaderAcceptPushPolicy = "Accept-Push-Policy"
|
||||
HeaderAcceptSignature = "Accept-Signature"
|
||||
HeaderAltSvc = "Alt-Svc"
|
||||
HeaderDate = "Date"
|
||||
HeaderIndex = "Index"
|
||||
HeaderLargeAllocation = "Large-Allocation"
|
||||
HeaderLink = "Link"
|
||||
HeaderPushPolicy = "Push-Policy"
|
||||
HeaderRetryAfter = "Retry-After"
|
||||
HeaderServerTiming = "Server-Timing"
|
||||
HeaderSignature = "Signature"
|
||||
HeaderSignedHeaders = "Signed-Headers"
|
||||
HeaderSourceMap = "SourceMap"
|
||||
HeaderUpgrade = "Upgrade"
|
||||
HeaderXDNSPrefetchControl = "X-DNS-Prefetch-Control"
|
||||
HeaderXPingback = "X-Pingback"
|
||||
HeaderXRequestedWith = "X-Requested-With"
|
||||
HeaderXRobotsTag = "X-Robots-Tag"
|
||||
HeaderXUACompatible = "X-UA-Compatible"
|
||||
)
|
194
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
194
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
|
@ -3,7 +3,6 @@ package fasthttp
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -11,7 +10,6 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
@ -23,7 +21,7 @@ import (
|
|||
//
|
||||
// Request instance MUST NOT be used from concurrently running goroutines.
|
||||
type Request struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Request header
|
||||
//
|
||||
|
@ -50,10 +48,6 @@ type Request struct {
|
|||
|
||||
// To detect scheme changes in redirects
|
||||
schemaUpdate bool
|
||||
|
||||
// Request timeout. Usually set by DoDealine or DoTimeout
|
||||
// if <= 0, means not set
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Response represents HTTP response.
|
||||
|
@ -63,21 +57,16 @@ type Request struct {
|
|||
//
|
||||
// Response instance MUST NOT be used from concurrently running goroutines.
|
||||
type Response struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Response header
|
||||
//
|
||||
// Copying Header by value is forbidden. Use pointer to Header instead.
|
||||
Header ResponseHeader
|
||||
|
||||
// Flush headers as soon as possible without waiting for first body bytes.
|
||||
// Relevant for bodyStream only.
|
||||
ImmediateHeaderFlush bool
|
||||
|
||||
bodyStream io.Reader
|
||||
w responseBodyWriter
|
||||
body *bytebufferpool.ByteBuffer
|
||||
bodyRaw []byte
|
||||
|
||||
// Response.Read() skips reading body if set to true.
|
||||
// Use it for reading HEAD responses.
|
||||
|
@ -323,7 +312,7 @@ func (resp *Response) Body() []byte {
|
|||
bodyBuf := resp.bodyBuffer()
|
||||
bodyBuf.Reset()
|
||||
_, err := copyZeroAlloc(bodyBuf, resp.bodyStream)
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
if err != nil {
|
||||
bodyBuf.SetString(err.Error())
|
||||
}
|
||||
|
@ -332,9 +321,6 @@ func (resp *Response) Body() []byte {
|
|||
}
|
||||
|
||||
func (resp *Response) bodyBytes() []byte {
|
||||
if resp.bodyRaw != nil {
|
||||
return resp.bodyRaw
|
||||
}
|
||||
if resp.body == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -352,7 +338,6 @@ func (resp *Response) bodyBuffer() *bytebufferpool.ByteBuffer {
|
|||
if resp.body == nil {
|
||||
resp.body = responseBodyPool.Get()
|
||||
}
|
||||
resp.bodyRaw = nil
|
||||
return resp.body
|
||||
}
|
||||
|
||||
|
@ -426,7 +411,7 @@ func inflateData(p []byte) ([]byte, error) {
|
|||
func (req *Request) BodyWriteTo(w io.Writer) error {
|
||||
if req.bodyStream != nil {
|
||||
_, err := copyZeroAlloc(w, req.bodyStream)
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
return err
|
||||
}
|
||||
if req.onlyMultipartForm() {
|
||||
|
@ -440,7 +425,7 @@ func (req *Request) BodyWriteTo(w io.Writer) error {
|
|||
func (resp *Response) BodyWriteTo(w io.Writer) error {
|
||||
if resp.bodyStream != nil {
|
||||
_, err := copyZeroAlloc(w, resp.bodyStream)
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
return err
|
||||
}
|
||||
_, err := w.Write(resp.bodyBytes())
|
||||
|
@ -456,8 +441,8 @@ func (resp *Response) AppendBody(p []byte) {
|
|||
|
||||
// AppendBodyString appends s to response body.
|
||||
func (resp *Response) AppendBodyString(s string) {
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.bodyBuffer().WriteString(s) //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
resp.bodyBuffer().WriteString(s)
|
||||
}
|
||||
|
||||
// SetBody sets response body.
|
||||
|
@ -469,16 +454,15 @@ func (resp *Response) SetBody(body []byte) {
|
|||
|
||||
// SetBodyString sets response body.
|
||||
func (resp *Response) SetBodyString(body string) {
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
bodyBuf := resp.bodyBuffer()
|
||||
bodyBuf.Reset()
|
||||
bodyBuf.WriteString(body) //nolint:errcheck
|
||||
bodyBuf.WriteString(body)
|
||||
}
|
||||
|
||||
// ResetBody resets response body.
|
||||
func (resp *Response) ResetBody() {
|
||||
resp.bodyRaw = nil
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
if resp.body != nil {
|
||||
if resp.keepBodyBuffer {
|
||||
resp.body.Reset()
|
||||
|
@ -489,14 +473,6 @@ func (resp *Response) ResetBody() {
|
|||
}
|
||||
}
|
||||
|
||||
// SetBodyRaw sets response body, but without copying it.
|
||||
//
|
||||
// From this point onward the body argument must not be changed.
|
||||
func (resp *Response) SetBodyRaw(body []byte) {
|
||||
resp.ResetBody()
|
||||
resp.bodyRaw = body
|
||||
}
|
||||
|
||||
// ReleaseBody retires the response body if it is greater than "size" bytes.
|
||||
//
|
||||
// This permits GC to reclaim the large buffer. If used, must be before
|
||||
|
@ -505,9 +481,8 @@ func (resp *Response) SetBodyRaw(body []byte) {
|
|||
// Use this method only if you really understand how it works.
|
||||
// The majority of workloads don't need this method.
|
||||
func (resp *Response) ReleaseBody(size int) {
|
||||
resp.bodyRaw = nil
|
||||
if cap(resp.body.B) > size {
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
resp.body = nil
|
||||
}
|
||||
}
|
||||
|
@ -521,7 +496,7 @@ func (resp *Response) ReleaseBody(size int) {
|
|||
// The majority of workloads don't need this method.
|
||||
func (req *Request) ReleaseBody(size int) {
|
||||
if cap(req.body.B) > size {
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
req.body = nil
|
||||
}
|
||||
}
|
||||
|
@ -537,15 +512,13 @@ func (resp *Response) SwapBody(body []byte) []byte {
|
|||
if resp.bodyStream != nil {
|
||||
bb.Reset()
|
||||
_, err := copyZeroAlloc(bb, resp.bodyStream)
|
||||
resp.closeBodyStream() //nolint:errcheck
|
||||
resp.closeBodyStream()
|
||||
if err != nil {
|
||||
bb.Reset()
|
||||
bb.SetString(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
resp.bodyRaw = nil
|
||||
|
||||
oldBody := bb.B
|
||||
bb.B = body
|
||||
return oldBody
|
||||
|
@ -562,7 +535,7 @@ func (req *Request) SwapBody(body []byte) []byte {
|
|||
if req.bodyStream != nil {
|
||||
bb.Reset()
|
||||
_, err := copyZeroAlloc(bb, req.bodyStream)
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
if err != nil {
|
||||
bb.Reset()
|
||||
bb.SetString(err.Error())
|
||||
|
@ -582,7 +555,7 @@ func (req *Request) Body() []byte {
|
|||
bodyBuf := req.bodyBuffer()
|
||||
bodyBuf.Reset()
|
||||
_, err := copyZeroAlloc(bodyBuf, req.bodyStream)
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
if err != nil {
|
||||
bodyBuf.SetString(err.Error())
|
||||
}
|
||||
|
@ -606,8 +579,8 @@ func (req *Request) AppendBody(p []byte) {
|
|||
// AppendBodyString appends s to request body.
|
||||
func (req *Request) AppendBodyString(s string) {
|
||||
req.RemoveMultipartFormFiles()
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.bodyBuffer().WriteString(s) //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
req.bodyBuffer().WriteString(s)
|
||||
}
|
||||
|
||||
// SetBody sets request body.
|
||||
|
@ -620,14 +593,14 @@ func (req *Request) SetBody(body []byte) {
|
|||
// SetBodyString sets request body.
|
||||
func (req *Request) SetBodyString(body string) {
|
||||
req.RemoveMultipartFormFiles()
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
req.bodyBuffer().SetString(body)
|
||||
}
|
||||
|
||||
// ResetBody resets request body.
|
||||
func (req *Request) ResetBody() {
|
||||
req.RemoveMultipartFormFiles()
|
||||
req.closeBodyStream() //nolint:errcheck
|
||||
req.closeBodyStream()
|
||||
if req.body != nil {
|
||||
if req.keepBodyBuffer {
|
||||
req.body.Reset()
|
||||
|
@ -666,12 +639,7 @@ func (req *Request) copyToSkipBody(dst *Request) {
|
|||
// CopyTo copies resp contents to dst except of body stream.
|
||||
func (resp *Response) CopyTo(dst *Response) {
|
||||
resp.copyToSkipBody(dst)
|
||||
if resp.bodyRaw != nil {
|
||||
dst.bodyRaw = resp.bodyRaw
|
||||
if dst.body != nil {
|
||||
dst.body.Reset()
|
||||
}
|
||||
} else if resp.body != nil {
|
||||
if resp.body != nil {
|
||||
dst.bodyBuffer().Set(resp.body.B)
|
||||
} else if dst.body != nil {
|
||||
dst.body.Reset()
|
||||
|
@ -693,7 +661,6 @@ func swapRequestBody(a, b *Request) {
|
|||
|
||||
func swapResponseBody(a, b *Response) {
|
||||
a.body, b.body = b.body, a.body
|
||||
a.bodyRaw, b.bodyRaw = b.bodyRaw, a.bodyRaw
|
||||
a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream
|
||||
}
|
||||
|
||||
|
@ -709,7 +676,7 @@ func (req *Request) parseURI() {
|
|||
}
|
||||
req.parsedURI = true
|
||||
|
||||
req.uri.parse(req.Header.Host(), req.Header.RequestURI(), req.isTLS)
|
||||
req.uri.parseQuick(req.Header.RequestURI(), &req.Header, req.isTLS)
|
||||
}
|
||||
|
||||
// PostArgs returns POST arguments.
|
||||
|
@ -835,7 +802,7 @@ func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize i
|
|||
// in multipart/form-data requests.
|
||||
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("form size must be greater than 0. Given %d", size)
|
||||
panic(fmt.Sprintf("BUG: form size must be greater than 0. Given %d", size))
|
||||
}
|
||||
lr := io.LimitReader(r, int64(size))
|
||||
mr := multipart.NewReader(lr, boundary)
|
||||
|
@ -850,7 +817,6 @@ func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize i
|
|||
func (req *Request) Reset() {
|
||||
req.Header.Reset()
|
||||
req.resetSkipHeader()
|
||||
req.timeout = 0
|
||||
}
|
||||
|
||||
func (req *Request) resetSkipHeader() {
|
||||
|
@ -868,7 +834,7 @@ func (req *Request) RemoveMultipartFormFiles() {
|
|||
if req.multipartForm != nil {
|
||||
// Do not check for error, since these files may be deleted or moved
|
||||
// to new places by user code.
|
||||
req.multipartForm.RemoveAll() //nolint:errcheck
|
||||
req.multipartForm.RemoveAll()
|
||||
req.multipartForm = nil
|
||||
}
|
||||
req.multipartFormBoundary = ""
|
||||
|
@ -881,7 +847,6 @@ func (resp *Response) Reset() {
|
|||
resp.SkipBody = false
|
||||
resp.raddr = nil
|
||||
resp.laddr = nil
|
||||
resp.ImmediateHeaderFlush = false
|
||||
}
|
||||
|
||||
func (resp *Response) resetSkipHeader() {
|
||||
|
@ -933,17 +898,17 @@ var ErrGetOnly = errors.New("non-GET request received")
|
|||
// io.EOF is returned if r is closed before reading the first header byte.
|
||||
func (req *Request) ReadLimitBody(r *bufio.Reader, maxBodySize int) error {
|
||||
req.resetSkipHeader()
|
||||
if err := req.Header.Read(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return req.readLimitBody(r, maxBodySize, false, true)
|
||||
return req.readLimitBody(r, maxBodySize, false)
|
||||
}
|
||||
|
||||
func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool, preParseMultipartForm bool) error {
|
||||
func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool) error {
|
||||
// Do not reset the request here - the caller must reset it before
|
||||
// calling this method.
|
||||
|
||||
err := req.Header.Read(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if getOnly && !req.Header.IsGet() {
|
||||
return ErrGetOnly
|
||||
}
|
||||
|
@ -955,7 +920,7 @@ func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool
|
|||
return nil
|
||||
}
|
||||
|
||||
return req.ContinueReadBody(r, maxBodySize, preParseMultipartForm)
|
||||
return req.ContinueReadBody(r, maxBodySize)
|
||||
}
|
||||
|
||||
// MayContinue returns true if the request contains
|
||||
|
@ -979,7 +944,7 @@ func (req *Request) MayContinue() bool {
|
|||
//
|
||||
// If maxBodySize > 0 and the body size exceeds maxBodySize,
|
||||
// then ErrBodyTooLarge is returned.
|
||||
func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int, preParseMultipartForm ...bool) error {
|
||||
func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error {
|
||||
var err error
|
||||
contentLength := req.Header.realContentLength()
|
||||
if contentLength > 0 {
|
||||
|
@ -987,18 +952,16 @@ func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int, preParseM
|
|||
return ErrBodyTooLarge
|
||||
}
|
||||
|
||||
if len(preParseMultipartForm) == 0 || preParseMultipartForm[0] {
|
||||
// Pre-read multipart form data of known length.
|
||||
// This way we limit memory usage for large file uploads, since their contents
|
||||
// is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize.
|
||||
req.multipartFormBoundary = string(req.Header.MultipartFormBoundary())
|
||||
if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 {
|
||||
req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize)
|
||||
if err != nil {
|
||||
req.Reset()
|
||||
}
|
||||
return err
|
||||
// Pre-read multipart form data of known length.
|
||||
// This way we limit memory usage for large file uploads, since their contents
|
||||
// is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize.
|
||||
req.multipartFormBoundary = string(req.Header.MultipartFormBoundary())
|
||||
if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 {
|
||||
req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize)
|
||||
if err != nil {
|
||||
req.Reset()
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1157,25 +1120,6 @@ func (req *Request) Write(w *bufio.Writer) error {
|
|||
}
|
||||
req.Header.SetHostBytes(host)
|
||||
req.Header.SetRequestURIBytes(uri.RequestURI())
|
||||
|
||||
if len(uri.username) > 0 {
|
||||
// RequestHeader.SetBytesKV only uses RequestHeader.bufKV.key
|
||||
// So we are free to use RequestHeader.bufKV.value as a scratch pad for
|
||||
// the base64 encoding.
|
||||
nl := len(uri.username) + len(uri.password) + 1
|
||||
nb := nl + len(strBasicSpace)
|
||||
tl := nb + base64.StdEncoding.EncodedLen(nl)
|
||||
if tl > cap(req.Header.bufKV.value) {
|
||||
req.Header.bufKV.value = make([]byte, 0, tl)
|
||||
}
|
||||
buf := req.Header.bufKV.value[:0]
|
||||
buf = append(buf, uri.username...)
|
||||
buf = append(buf, strColon...)
|
||||
buf = append(buf, uri.password...)
|
||||
buf = append(buf, strBasicSpace...)
|
||||
base64.StdEncoding.Encode(buf[nb:tl], buf[:nl])
|
||||
req.Header.SetBytesKV(strAuthorization, buf[nl:tl])
|
||||
}
|
||||
}
|
||||
|
||||
if req.bodyStream != nil {
|
||||
|
@ -1192,12 +1136,11 @@ func (req *Request) Write(w *bufio.Writer) error {
|
|||
req.Header.SetMultipartFormBoundary(req.multipartFormBoundary)
|
||||
}
|
||||
|
||||
hasBody := false
|
||||
if len(body) == 0 {
|
||||
body = req.postArgs.QueryString()
|
||||
}
|
||||
if len(body) != 0 || !req.Header.ignoreBody() {
|
||||
hasBody = true
|
||||
hasBody := !req.Header.ignoreBody()
|
||||
if hasBody {
|
||||
if len(body) == 0 {
|
||||
body = req.postArgs.QueryString()
|
||||
}
|
||||
req.Header.SetContentLength(len(body))
|
||||
}
|
||||
if err = req.Header.Write(w); err != nil {
|
||||
|
@ -1300,7 +1243,7 @@ func (resp *Response) gzipBody(level int) error {
|
|||
wf: zw,
|
||||
bw: sw,
|
||||
}
|
||||
copyZeroAlloc(fw, bs) //nolint:errcheck
|
||||
copyZeroAlloc(fw, bs)
|
||||
releaseStacklessGzipWriter(zw, level)
|
||||
if bsc, ok := bs.(io.Closer); ok {
|
||||
bsc.Close()
|
||||
|
@ -1322,7 +1265,6 @@ func (resp *Response) gzipBody(level int) error {
|
|||
responseBodyPool.Put(resp.body)
|
||||
}
|
||||
resp.body = w
|
||||
resp.bodyRaw = nil
|
||||
}
|
||||
resp.Header.SetCanonical(strContentEncoding, strGzip)
|
||||
return nil
|
||||
|
@ -1355,7 +1297,7 @@ func (resp *Response) deflateBody(level int) error {
|
|||
wf: zw,
|
||||
bw: sw,
|
||||
}
|
||||
copyZeroAlloc(fw, bs) //nolint:errcheck
|
||||
copyZeroAlloc(fw, bs)
|
||||
releaseStacklessDeflateWriter(zw, level)
|
||||
if bsc, ok := bs.(io.Closer); ok {
|
||||
bsc.Close()
|
||||
|
@ -1377,7 +1319,6 @@ func (resp *Response) deflateBody(level int) error {
|
|||
responseBodyPool.Put(resp.body)
|
||||
}
|
||||
resp.body = w
|
||||
resp.bodyRaw = nil
|
||||
}
|
||||
resp.Header.SetCanonical(strContentEncoding, strDeflate)
|
||||
return nil
|
||||
|
@ -1471,19 +1412,8 @@ func (req *Request) writeBodyStream(w *bufio.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// ErrBodyStreamWritePanic is returned when panic happens during writing body stream.
|
||||
type ErrBodyStreamWritePanic struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = &ErrBodyStreamWritePanic{
|
||||
error: fmt.Errorf("panic while writing body stream: %+v", r),
|
||||
}
|
||||
}
|
||||
}()
|
||||
func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) error {
|
||||
var err error
|
||||
|
||||
contentLength := resp.Header.ContentLength()
|
||||
if contentLength < 0 {
|
||||
|
@ -1500,22 +1430,12 @@ func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) (err error
|
|||
}
|
||||
if contentLength >= 0 {
|
||||
if err = resp.Header.Write(w); err == nil && sendBody {
|
||||
if resp.ImmediateHeaderFlush {
|
||||
err = w.Flush()
|
||||
}
|
||||
if err == nil {
|
||||
err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength))
|
||||
}
|
||||
err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength))
|
||||
}
|
||||
} else {
|
||||
resp.Header.SetContentLength(-1)
|
||||
if err = resp.Header.Write(w); err == nil && sendBody {
|
||||
if resp.ImmediateHeaderFlush {
|
||||
err = w.Flush()
|
||||
}
|
||||
if err == nil {
|
||||
err = writeBodyChunked(w, resp.bodyStream)
|
||||
}
|
||||
err = writeBodyChunked(w, resp.bodyStream)
|
||||
}
|
||||
}
|
||||
err1 := resp.closeBodyStream()
|
||||
|
@ -1666,15 +1586,9 @@ var copyBufPool = sync.Pool{
|
|||
|
||||
func writeChunk(w *bufio.Writer, b []byte) error {
|
||||
n := len(b)
|
||||
if err := writeHexInt(w, n); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(strCRLF); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
writeHexInt(w, n)
|
||||
w.Write(strCRLF)
|
||||
w.Write(b)
|
||||
_, err := w.Write(strCRLF)
|
||||
err1 := w.Flush()
|
||||
if err == nil {
|
||||
|
|
44
vendor/github.com/valyala/fasthttp/lbclient.go
generated
vendored
44
vendor/github.com/valyala/fasthttp/lbclient.go
generated
vendored
|
@ -17,7 +17,7 @@ type BalancingClient interface {
|
|||
//
|
||||
// It has the following features:
|
||||
//
|
||||
// - Balances load among available clients using 'least loaded' + 'least total'
|
||||
// - Balances load among available clients using 'least loaded' + 'round robin'
|
||||
// hybrid technique.
|
||||
// - Dynamically decreases load on unhealthy clients.
|
||||
//
|
||||
|
@ -25,7 +25,7 @@ type BalancingClient interface {
|
|||
//
|
||||
// It is safe calling LBClient methods from concurrently running goroutines.
|
||||
type LBClient struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Clients must contain non-zero clients list.
|
||||
// Incoming requests are balanced among these clients.
|
||||
|
@ -49,6 +49,10 @@ type LBClient struct {
|
|||
|
||||
cs []*lbClient
|
||||
|
||||
// nextIdx is for spreading requests among equally loaded clients
|
||||
// in a round-robin fashion.
|
||||
nextIdx uint32
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
|
@ -89,23 +93,42 @@ func (cc *LBClient) init() {
|
|||
healthCheck: cc.HealthCheck,
|
||||
})
|
||||
}
|
||||
|
||||
// Randomize nextIdx in order to prevent initial servers'
|
||||
// hammering from a cluster of identical LBClients.
|
||||
cc.nextIdx = uint32(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func (cc *LBClient) get() *lbClient {
|
||||
cc.once.Do(cc.init)
|
||||
|
||||
cs := cc.cs
|
||||
idx := atomic.AddUint32(&cc.nextIdx, 1)
|
||||
idx %= uint32(len(cs))
|
||||
|
||||
minC := cs[0]
|
||||
minC := cs[idx]
|
||||
minN := minC.PendingRequests()
|
||||
minT := atomic.LoadUint64(&minC.total)
|
||||
for _, c := range cs[1:] {
|
||||
if minN == 0 {
|
||||
return minC
|
||||
}
|
||||
for _, c := range cs[idx+1:] {
|
||||
n := c.PendingRequests()
|
||||
t := atomic.LoadUint64(&c.total)
|
||||
if n < minN || (n == minN && t < minT) {
|
||||
if n == 0 {
|
||||
return c
|
||||
}
|
||||
if n < minN {
|
||||
minC = c
|
||||
minN = n
|
||||
}
|
||||
}
|
||||
for _, c := range cs[:idx] {
|
||||
n := c.PendingRequests()
|
||||
if n == 0 {
|
||||
return c
|
||||
}
|
||||
if n < minN {
|
||||
minC = c
|
||||
minN = n
|
||||
minT = t
|
||||
}
|
||||
}
|
||||
return minC
|
||||
|
@ -115,9 +138,6 @@ type lbClient struct {
|
|||
c BalancingClient
|
||||
healthCheck func(req *Request, resp *Response, err error) bool
|
||||
penalty uint32
|
||||
|
||||
// total amount of requests handled.
|
||||
total uint64
|
||||
}
|
||||
|
||||
func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
|
||||
|
@ -126,8 +146,6 @@ func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time)
|
|||
// Penalize the client returning error, so the next requests
|
||||
// are routed to another clients.
|
||||
time.AfterFunc(penaltyDuration, c.decPenalty)
|
||||
} else {
|
||||
atomic.AddUint64(&c.total, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
14
vendor/github.com/valyala/fasthttp/methods.go
generated
vendored
14
vendor/github.com/valyala/fasthttp/methods.go
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
package fasthttp
|
||||
|
||||
// HTTP methods were copied from net/http.
|
||||
const (
|
||||
MethodGet = "GET" // RFC 7231, 4.3.1
|
||||
MethodHead = "HEAD" // RFC 7231, 4.3.2
|
||||
MethodPost = "POST" // RFC 7231, 4.3.3
|
||||
MethodPut = "PUT" // RFC 7231, 4.3.4
|
||||
MethodPatch = "PATCH" // RFC 5789
|
||||
MethodDelete = "DELETE" // RFC 7231, 4.3.5
|
||||
MethodConnect = "CONNECT" // RFC 7231, 4.3.6
|
||||
MethodOptions = "OPTIONS" // RFC 7231, 4.3.7
|
||||
MethodTrace = "TRACE" // RFC 7231, 4.3.8
|
||||
)
|
2
vendor/github.com/valyala/fasthttp/nocopy.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/nocopy.go
generated
vendored
|
@ -5,7 +5,7 @@ package fasthttp
|
|||
//
|
||||
// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details.
|
||||
// and also: https://stackoverflow.com/questions/52494458/nocopy-minimal-example
|
||||
type noCopy struct{} //nolint:unused
|
||||
type noCopy struct{}
|
||||
|
||||
func (*noCopy) Lock() {}
|
||||
func (*noCopy) Unlock() {}
|
||||
|
|
554
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
554
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
|
@ -17,7 +17,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var errNoCertOrKeyProvided = errors.New("cert or key has not provided")
|
||||
var errNoCertOrKeyProvided = errors.New("Cert or key has not provided")
|
||||
|
||||
var (
|
||||
// ErrAlreadyServing is returned when calling Serve on a Server
|
||||
|
@ -148,7 +148,7 @@ type ServeHandler func(c net.Conn) error
|
|||
//
|
||||
// It is safe to call Server methods from concurrently running goroutines.
|
||||
type Server struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Handler for processing incoming requests.
|
||||
//
|
||||
|
@ -167,11 +167,6 @@ type Server struct {
|
|||
// * ErrBrokenChunks
|
||||
ErrorHandler func(ctx *RequestCtx, err error)
|
||||
|
||||
// HeaderReceived is called after receiving the header
|
||||
//
|
||||
// non zero RequestConfig field values will overwrite the default configs
|
||||
HeaderReceived func(header *RequestHeader) RequestConfig
|
||||
|
||||
// Server name for sending in response headers.
|
||||
//
|
||||
// Default server name is used if left blank.
|
||||
|
@ -180,9 +175,6 @@ type Server struct {
|
|||
// The maximum number of concurrent connections the server may serve.
|
||||
//
|
||||
// DefaultConcurrency is used if not set.
|
||||
//
|
||||
// Concurrency only works if you either call Serve once, or only ServeConn multiple times.
|
||||
// It works with ListenAndServe as well.
|
||||
Concurrency int
|
||||
|
||||
// Whether to disable keep-alive connections.
|
||||
|
@ -207,26 +199,19 @@ type Server struct {
|
|||
// Default buffer size is used if not set.
|
||||
WriteBufferSize int
|
||||
|
||||
// ReadTimeout is the amount of time allowed to read
|
||||
// the full request including body. The connection's read
|
||||
// deadline is reset when the connection opens, or for
|
||||
// keep-alive connections after the first byte has been read.
|
||||
// Maximum duration for reading the full request (including body).
|
||||
//
|
||||
// This also limits the maximum duration for idle keep-alive
|
||||
// connections.
|
||||
//
|
||||
// By default request read timeout is unlimited.
|
||||
ReadTimeout time.Duration
|
||||
|
||||
// WriteTimeout is the maximum duration before timing out
|
||||
// writes of the response. It is reset after the request handler
|
||||
// has returned.
|
||||
// Maximum duration for writing the full response (including body).
|
||||
//
|
||||
// By default response write timeout is unlimited.
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// IdleTimeout is the maximum amount of time to wait for the
|
||||
// next request when keep-alive is enabled. If IdleTimeout
|
||||
// is zero, the value of ReadTimeout is used.
|
||||
IdleTimeout time.Duration
|
||||
|
||||
// Maximum number of concurrent client connections allowed per IP.
|
||||
//
|
||||
// By default unlimited number of concurrent connections
|
||||
|
@ -241,8 +226,15 @@ type Server struct {
|
|||
// By default unlimited number of requests may be served per connection.
|
||||
MaxRequestsPerConn int
|
||||
|
||||
// MaxKeepaliveDuration is a no-op and only left here for backwards compatibility.
|
||||
// Deprecated: Use IdleTimeout instead.
|
||||
// Maximum keep-alive connection lifetime.
|
||||
//
|
||||
// The server closes keep-alive connection after its' lifetime
|
||||
// expiration.
|
||||
//
|
||||
// See also ReadTimeout for limiting the duration of idle keep-alive
|
||||
// connections.
|
||||
//
|
||||
// By default keep-alive connection lifetime is unlimited.
|
||||
MaxKeepaliveDuration time.Duration
|
||||
|
||||
// Whether to enable tcp keep-alive connections.
|
||||
|
@ -283,14 +275,6 @@ type Server struct {
|
|||
// Server accepts all the requests by default.
|
||||
GetOnly bool
|
||||
|
||||
// Will not pre parse Multipart Form data if set to true.
|
||||
//
|
||||
// This option is useful for servers that desire to treat
|
||||
// multipart form data as a binary blob, or choose when to parse the data.
|
||||
//
|
||||
// Server pre parses multipart form data by default.
|
||||
DisablePreParseMultipartForm bool
|
||||
|
||||
// Logs all errors, including the most frequent
|
||||
// 'connection reset by peer', 'broken pipe' and 'connection timeout'
|
||||
// errors. Such errors are common in production serving real-world
|
||||
|
@ -333,13 +317,6 @@ type Server struct {
|
|||
// value is explicitly provided during a request.
|
||||
NoDefaultServerHeader bool
|
||||
|
||||
// NoDefaultDate, when set to true, causes the default Date
|
||||
// header to be excluded from the Response.
|
||||
//
|
||||
// The default Date header value is the current date value. When
|
||||
// set to true, the Date will not be present.
|
||||
NoDefaultDate bool
|
||||
|
||||
// NoDefaultContentType, when set to true, causes the default Content-Type
|
||||
// header to be excluded from the Response.
|
||||
//
|
||||
|
@ -357,13 +334,6 @@ type Server struct {
|
|||
// By default standard logger from log package is used.
|
||||
Logger Logger
|
||||
|
||||
// KeepHijackedConns is an opt-in disable of connection
|
||||
// close by fasthttp after connections' HijackHandler returns.
|
||||
// This allows to save goroutines, e.g. when fasthttp used to upgrade
|
||||
// http connections to WS and connection goes to another handler,
|
||||
// which will close it when needed.
|
||||
KeepHijackedConns bool
|
||||
|
||||
tlsConfig *tls.Config
|
||||
nextProtos map[string]ServeHandler
|
||||
|
||||
|
@ -376,9 +346,10 @@ type Server struct {
|
|||
readerPool sync.Pool
|
||||
writerPool sync.Pool
|
||||
hijackConnPool sync.Pool
|
||||
bytePool sync.Pool
|
||||
|
||||
// We need to know our listeners so we can close them in Shutdown().
|
||||
ln []net.Listener
|
||||
// We need to know our listener so we can close it in Shutdown().
|
||||
ln net.Listener
|
||||
|
||||
mu sync.Mutex
|
||||
open int32
|
||||
|
@ -394,17 +365,6 @@ type Server struct {
|
|||
// msg to the client if there are more than Server.Concurrency concurrent
|
||||
// handlers h are running at the moment.
|
||||
func TimeoutHandler(h RequestHandler, timeout time.Duration, msg string) RequestHandler {
|
||||
return TimeoutWithCodeHandler(h, timeout, msg, StatusRequestTimeout)
|
||||
}
|
||||
|
||||
// TimeoutWithCodeHandler creates RequestHandler, which returns an error with
|
||||
// the given msg and status code to the client if h didn't return during
|
||||
// the given duration.
|
||||
//
|
||||
// The returned handler may return StatusTooManyRequests error with the given
|
||||
// msg to the client if there are more than Server.Concurrency concurrent
|
||||
// handlers h are running at the moment.
|
||||
func TimeoutWithCodeHandler(h RequestHandler, timeout time.Duration, msg string, statusCode int) RequestHandler {
|
||||
if timeout <= 0 {
|
||||
return h
|
||||
}
|
||||
|
@ -432,27 +392,12 @@ func TimeoutWithCodeHandler(h RequestHandler, timeout time.Duration, msg string,
|
|||
select {
|
||||
case <-ch:
|
||||
case <-ctx.timeoutTimer.C:
|
||||
ctx.TimeoutErrorWithCode(msg, statusCode)
|
||||
ctx.TimeoutError(msg)
|
||||
}
|
||||
stopTimer(ctx.timeoutTimer)
|
||||
}
|
||||
}
|
||||
|
||||
//RequestConfig configure the per request deadline and body limits
|
||||
type RequestConfig struct {
|
||||
// ReadTimeout is the maximum duration for reading the entire
|
||||
// request body.
|
||||
// a zero value means that default values will be honored
|
||||
ReadTimeout time.Duration
|
||||
// WriteTimeout is the maximum duration before timing out
|
||||
// writes of the response.
|
||||
// a zero value means that default values will be honored
|
||||
WriteTimeout time.Duration
|
||||
// Maximum request body size.
|
||||
// a zero value means that default values will be honored
|
||||
MaxRequestBodySize int
|
||||
}
|
||||
|
||||
// CompressHandler returns RequestHandler that transparently compresses
|
||||
// response body generated by h if the request contains 'gzip' or 'deflate'
|
||||
// 'Accept-Encoding' header.
|
||||
|
@ -475,9 +420,9 @@ func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
|
|||
return func(ctx *RequestCtx) {
|
||||
h(ctx)
|
||||
if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) {
|
||||
ctx.Response.gzipBody(level) //nolint:errcheck
|
||||
ctx.Response.gzipBody(level)
|
||||
} else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) {
|
||||
ctx.Response.deflateBody(level) //nolint:errcheck
|
||||
ctx.Response.deflateBody(level)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -497,7 +442,7 @@ func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
|
|||
// running goroutines. The only exception is TimeoutError*, which may be called
|
||||
// while other goroutines accessing RequestCtx.
|
||||
type RequestCtx struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
// Incoming request.
|
||||
//
|
||||
|
@ -511,6 +456,8 @@ type RequestCtx struct {
|
|||
|
||||
userValues userData
|
||||
|
||||
lastReadDuration time.Duration
|
||||
|
||||
connID uint64
|
||||
connRequestNum uint64
|
||||
connTime time.Time
|
||||
|
@ -526,19 +473,14 @@ type RequestCtx struct {
|
|||
timeoutCh chan struct{}
|
||||
timeoutTimer *time.Timer
|
||||
|
||||
hijackHandler HijackHandler
|
||||
hijackNoResponse bool
|
||||
hijackHandler HijackHandler
|
||||
}
|
||||
|
||||
// HijackHandler must process the hijacked connection c.
|
||||
//
|
||||
// If KeepHijackedConns is disabled, which is by default,
|
||||
// the connection c is automatically closed after returning from HijackHandler.
|
||||
// The connection c is automatically closed after returning from HijackHandler.
|
||||
//
|
||||
// The connection c must not be used after returning from the handler, if KeepHijackedConns is disabled.
|
||||
//
|
||||
// When KeepHijackedConns enabled, fasthttp will not Close() the connection,
|
||||
// you must do it when you need it. You must not use c in any way after calling Close().
|
||||
// The connection c must not be used after returning from the handler.
|
||||
type HijackHandler func(c net.Conn)
|
||||
|
||||
// Hijack registers the given handler for connection hijacking.
|
||||
|
@ -554,7 +496,6 @@ type HijackHandler func(c net.Conn)
|
|||
// * Unexpected error during response writing to the connection.
|
||||
//
|
||||
// The server stops processing requests from hijacked connections.
|
||||
//
|
||||
// Server limits such as Concurrency, ReadTimeout, WriteTimeout, etc.
|
||||
// aren't applied to hijacked connections.
|
||||
//
|
||||
|
@ -570,15 +511,6 @@ func (ctx *RequestCtx) Hijack(handler HijackHandler) {
|
|||
ctx.hijackHandler = handler
|
||||
}
|
||||
|
||||
// HijackSetNoResponse changes the behavior of hijacking a request.
|
||||
// If HijackSetNoResponse is called with false fasthttp will send a response
|
||||
// to the client before calling the HijackHandler (default). If HijackSetNoResponse
|
||||
// is called with true no response is send back before calling the
|
||||
// HijackHandler supplied in the Hijack function.
|
||||
func (ctx *RequestCtx) HijackSetNoResponse(noResponse bool) {
|
||||
ctx.hijackNoResponse = noResponse
|
||||
}
|
||||
|
||||
// Hijacked returns true after Hijack is called.
|
||||
func (ctx *RequestCtx) Hijacked() bool {
|
||||
return ctx.hijackHandler != nil
|
||||
|
@ -714,9 +646,10 @@ type ctxLogger struct {
|
|||
}
|
||||
|
||||
func (cl *ctxLogger) Printf(format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
ctxLoggerLock.Lock()
|
||||
cl.logger.Printf("%.3f %s - %s", time.Since(cl.ctx.ConnTime()).Seconds(), cl.ctx.String(), msg)
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
ctx := cl.ctx
|
||||
cl.logger.Printf("%.3f %s - %s", time.Since(ctx.Time()).Seconds(), ctx.String(), msg)
|
||||
ctxLoggerLock.Unlock()
|
||||
}
|
||||
|
||||
|
@ -894,21 +827,16 @@ func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) {
|
|||
var ErrMissingFile = errors.New("there is no uploaded file associated with the given key")
|
||||
|
||||
// SaveMultipartFile saves multipart file fh under the given filename path.
|
||||
func SaveMultipartFile(fh *multipart.FileHeader, path string) (err error) {
|
||||
var (
|
||||
f multipart.File
|
||||
ff *os.File
|
||||
)
|
||||
f, err = fh.Open()
|
||||
func SaveMultipartFile(fh *multipart.FileHeader, path string) error {
|
||||
f, err := fh.Open()
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if ff, ok = f.(*os.File); ok {
|
||||
if ff, ok := f.(*os.File); ok {
|
||||
// Windows can't rename files that are opened.
|
||||
if err = f.Close(); err != nil {
|
||||
return
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If renaming fails we try the normal copying method.
|
||||
|
@ -918,29 +846,21 @@ func SaveMultipartFile(fh *multipart.FileHeader, path string) (err error) {
|
|||
}
|
||||
|
||||
// Reopen f for the code below.
|
||||
if f, err = fh.Open(); err != nil {
|
||||
return
|
||||
f, err = fh.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := f.Close()
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
defer f.Close()
|
||||
|
||||
if ff, err = os.Create(path); err != nil {
|
||||
return
|
||||
ff, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
e := ff.Close()
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
defer ff.Close()
|
||||
_, err = copyZeroAlloc(ff, f)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// FormValue returns form value associated with the given key.
|
||||
|
@ -1081,8 +1001,6 @@ func addrToIP(addr net.Addr) net.IP {
|
|||
|
||||
// Error sets response status code to the given value and sets response body
|
||||
// to the given message.
|
||||
//
|
||||
// Warning: this will reset the response headers and body already set!
|
||||
func (ctx *RequestCtx) Error(msg string, statusCode int) {
|
||||
ctx.Response.Reset()
|
||||
ctx.SetStatusCode(statusCode)
|
||||
|
@ -1399,15 +1317,9 @@ func (ln tcpKeepaliveListener) Accept() (net.Conn, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tc.SetKeepAlive(true); err != nil {
|
||||
tc.Close() //nolint:errcheck
|
||||
return nil, err
|
||||
}
|
||||
tc.SetKeepAlive(true)
|
||||
if ln.keepalivePeriod > 0 {
|
||||
if err := tc.SetKeepAlivePeriod(ln.keepalivePeriod); err != nil {
|
||||
tc.Close() //nolint:errcheck
|
||||
return nil, err
|
||||
}
|
||||
tc.SetKeepAlivePeriod(ln.keepalivePeriod)
|
||||
}
|
||||
return tc, nil
|
||||
}
|
||||
|
@ -1608,21 +1520,20 @@ func (s *Server) Serve(ln net.Listener) error {
|
|||
var c net.Conn
|
||||
var err error
|
||||
|
||||
maxWorkersCount := s.getConcurrency()
|
||||
|
||||
s.mu.Lock()
|
||||
{
|
||||
s.ln = append(s.ln, ln)
|
||||
if s.done == nil {
|
||||
s.done = make(chan struct{})
|
||||
if s.ln != nil {
|
||||
s.mu.Unlock()
|
||||
return ErrAlreadyServing
|
||||
}
|
||||
|
||||
if s.concurrencyCh == nil {
|
||||
s.concurrencyCh = make(chan struct{}, maxWorkersCount)
|
||||
}
|
||||
s.ln = ln
|
||||
s.done = make(chan struct{})
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
maxWorkersCount := s.getConcurrency()
|
||||
s.concurrencyCh = make(chan struct{}, maxWorkersCount)
|
||||
wp := &workerPool{
|
||||
WorkerFunc: s.serveConn,
|
||||
MaxWorkersCount: maxWorkersCount,
|
||||
|
@ -1695,10 +1606,8 @@ func (s *Server) Shutdown() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
for _, ln := range s.ln {
|
||||
if err := ln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.ln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.done != nil {
|
||||
|
@ -1718,7 +1627,6 @@ func (s *Server) Shutdown() error {
|
|||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
s.done = nil
|
||||
s.ln = nil
|
||||
return nil
|
||||
}
|
||||
|
@ -1792,6 +1700,10 @@ var (
|
|||
// ErrConcurrencyLimit may be returned from ServeConn if the number
|
||||
// of concurrently served connections exceeds Server.Concurrency.
|
||||
ErrConcurrencyLimit = errors.New("cannot serve the connection because Server.Concurrency concurrent connections are served")
|
||||
|
||||
// ErrKeepaliveTimeout is returned from ServeConn
|
||||
// if the connection lifetime exceeds MaxKeepaliveDuration.
|
||||
ErrKeepaliveTimeout = errors.New("exceeded MaxKeepaliveDuration")
|
||||
)
|
||||
|
||||
// ServeConn serves HTTP requests from the given connection.
|
||||
|
@ -1853,15 +1765,7 @@ func (s *Server) GetCurrentConcurrency() uint32 {
|
|||
//
|
||||
// This function is intended be used by monitoring systems
|
||||
func (s *Server) GetOpenConnectionsCount() int32 {
|
||||
if atomic.LoadInt32(&s.stop) == 0 {
|
||||
// Decrement by one to avoid reporting the extra open value that gets
|
||||
// counted while the server is listening.
|
||||
return atomic.LoadInt32(&s.open) - 1
|
||||
}
|
||||
// This is not perfect, because s.stop could have changed to zero
|
||||
// before we load the value of s.open. However, in the common case
|
||||
// this avoids underreporting open connections by 1 during server shutdown.
|
||||
return atomic.LoadInt32(&s.open)
|
||||
return atomic.LoadInt32(&s.open) - 1
|
||||
}
|
||||
|
||||
func (s *Server) getConcurrency() int {
|
||||
|
@ -1884,28 +1788,16 @@ func nextConnID() uint64 {
|
|||
// See Server.MaxRequestBodySize for details.
|
||||
const DefaultMaxRequestBodySize = 4 * 1024 * 1024
|
||||
|
||||
func (s *Server) idleTimeout() time.Duration {
|
||||
if s.IdleTimeout != 0 {
|
||||
return s.IdleTimeout
|
||||
}
|
||||
return s.ReadTimeout
|
||||
}
|
||||
func (s *Server) serveConn(c net.Conn) error {
|
||||
defer atomic.AddInt32(&s.open, -1)
|
||||
|
||||
func (s *Server) serveConnCleanup() {
|
||||
atomic.AddInt32(&s.open, -1)
|
||||
atomic.AddUint32(&s.concurrency, ^uint32(0))
|
||||
}
|
||||
|
||||
func (s *Server) serveConn(c net.Conn) (err error) {
|
||||
defer s.serveConnCleanup()
|
||||
atomic.AddUint32(&s.concurrency, 1)
|
||||
|
||||
var proto string
|
||||
if proto, err = s.getNextProto(c); err != nil {
|
||||
return
|
||||
}
|
||||
if handler, ok := s.nextProtos[proto]; ok {
|
||||
return handler(c)
|
||||
if proto, err := s.getNextProto(c); err != nil {
|
||||
return err
|
||||
} else {
|
||||
handler, ok := s.nextProtos[proto]
|
||||
if ok {
|
||||
return handler(c)
|
||||
}
|
||||
}
|
||||
|
||||
var serverName []byte
|
||||
|
@ -1914,12 +1806,12 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
}
|
||||
connRequestNum := uint64(0)
|
||||
connID := nextConnID()
|
||||
connTime := time.Now()
|
||||
currentTime := time.Now()
|
||||
connTime := currentTime
|
||||
maxRequestBodySize := s.MaxRequestBodySize
|
||||
if maxRequestBodySize <= 0 {
|
||||
maxRequestBodySize = DefaultMaxRequestBodySize
|
||||
}
|
||||
writeTimeout := s.WriteTimeout
|
||||
|
||||
ctx := s.acquireCtx(c)
|
||||
ctx.connTime = connTime
|
||||
|
@ -1928,113 +1820,69 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
br *bufio.Reader
|
||||
bw *bufio.Writer
|
||||
|
||||
timeoutResponse *Response
|
||||
hijackHandler HijackHandler
|
||||
hijackNoResponse bool
|
||||
err error
|
||||
timeoutResponse *Response
|
||||
hijackHandler HijackHandler
|
||||
|
||||
lastReadDeadlineTime time.Time
|
||||
lastWriteDeadlineTime time.Time
|
||||
|
||||
connectionClose bool
|
||||
isHTTP11 bool
|
||||
|
||||
reqReset bool
|
||||
)
|
||||
for {
|
||||
connRequestNum++
|
||||
ctx.time = currentTime
|
||||
|
||||
// If this is a keep-alive connection set the idle timeout.
|
||||
if connRequestNum > 1 {
|
||||
if d := s.idleTimeout(); d > 0 {
|
||||
if err := c.SetReadDeadline(time.Now().Add(d)); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", d, err))
|
||||
}
|
||||
if s.ReadTimeout > 0 || s.MaxKeepaliveDuration > 0 {
|
||||
lastReadDeadlineTime = s.updateReadDeadline(c, ctx, lastReadDeadlineTime)
|
||||
if lastReadDeadlineTime.IsZero() {
|
||||
err = ErrKeepaliveTimeout
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !s.ReduceMemoryUsage || br != nil {
|
||||
if !(s.ReduceMemoryUsage || ctx.lastReadDuration > time.Second) || br != nil {
|
||||
if br == nil {
|
||||
br = acquireReader(ctx)
|
||||
}
|
||||
|
||||
// If this is a keep-alive connection we want to try and read the first bytes
|
||||
// within the idle time.
|
||||
if connRequestNum > 1 {
|
||||
var b []byte
|
||||
b, err = br.Peek(4)
|
||||
if len(b) == 0 {
|
||||
// If reading from a keep-alive connection returns nothing it means
|
||||
// the connection was closed (either timeout or from the other side).
|
||||
if err != io.EOF {
|
||||
err = errNothingRead{err}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If this is a keep-alive connection acquireByteReader will try to peek
|
||||
// a couple of bytes already so the idle timeout will already be used.
|
||||
br, err = acquireByteReader(&ctx)
|
||||
}
|
||||
|
||||
ctx.Request.isTLS = isTLS
|
||||
ctx.Response.Header.noDefaultContentType = s.NoDefaultContentType
|
||||
ctx.Response.Header.noDefaultDate = s.NoDefaultDate
|
||||
|
||||
if err == nil {
|
||||
if s.ReadTimeout > 0 {
|
||||
if err := c.SetReadDeadline(time.Now().Add(s.ReadTimeout)); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", s.ReadTimeout, err))
|
||||
}
|
||||
}
|
||||
if s.DisableHeaderNamesNormalizing {
|
||||
ctx.Request.Header.DisableNormalizing()
|
||||
ctx.Response.Header.DisableNormalizing()
|
||||
}
|
||||
// reading Headers
|
||||
if err = ctx.Request.Header.Read(br); err == nil {
|
||||
if onHdrRecv := s.HeaderReceived; onHdrRecv != nil {
|
||||
reqConf := onHdrRecv(&ctx.Request.Header)
|
||||
if reqConf.ReadTimeout > 0 {
|
||||
deadline := time.Now().Add(reqConf.ReadTimeout)
|
||||
if err := c.SetReadDeadline(deadline); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", deadline, err))
|
||||
}
|
||||
}
|
||||
if reqConf.MaxRequestBodySize > 0 {
|
||||
maxRequestBodySize = reqConf.MaxRequestBodySize
|
||||
}
|
||||
if reqConf.WriteTimeout > 0 {
|
||||
writeTimeout = reqConf.WriteTimeout
|
||||
}
|
||||
}
|
||||
//read body
|
||||
err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly, !s.DisablePreParseMultipartForm)
|
||||
}
|
||||
// reading Headers and Body
|
||||
err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly)
|
||||
if err == nil {
|
||||
// If we read any bytes off the wire, we're active.
|
||||
s.setState(c, StateActive)
|
||||
}
|
||||
|
||||
if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil {
|
||||
releaseReader(s, br)
|
||||
br = nil
|
||||
}
|
||||
}
|
||||
|
||||
currentTime = time.Now()
|
||||
ctx.lastReadDuration = currentTime.Sub(ctx.time)
|
||||
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
} else if nr, ok := err.(errNothingRead); ok {
|
||||
if connRequestNum > 1 {
|
||||
// This is not the first request and we haven't read a single byte
|
||||
// of a new request yet. This means it's just a keep-alive connection
|
||||
// closing down either because the remote closed it or because
|
||||
// or a read timeout on our side. Either way just close the connection
|
||||
// and don't return any error response.
|
||||
err = nil
|
||||
} else {
|
||||
err = nr.error
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
} else if connRequestNum > 1 && err == errNothingRead {
|
||||
// This is not the first request and we haven't read a single byte
|
||||
// of a new request yet. This means it's just a keep-alive connection
|
||||
// closing down either because the remote closed it or because
|
||||
// or a read timeout on our side. Either way just close the connection
|
||||
// and don't return any error response.
|
||||
err = nil
|
||||
} else {
|
||||
bw = s.writeErrorResponse(bw, ctx, serverName, err)
|
||||
}
|
||||
break
|
||||
|
@ -2042,15 +1890,12 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
|
||||
// 'Expect: 100-continue' request handling.
|
||||
// See http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html for details.
|
||||
if ctx.Request.MayContinue() {
|
||||
if !ctx.Request.Header.ignoreBody() && ctx.Request.MayContinue() {
|
||||
// Send 'HTTP/1.1 100 Continue' response.
|
||||
if bw == nil {
|
||||
bw = acquireWriter(ctx)
|
||||
}
|
||||
_, err = bw.Write(strResponseContinue)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
bw.Write(strResponseContinue)
|
||||
err = bw.Flush()
|
||||
if err != nil {
|
||||
break
|
||||
|
@ -2064,7 +1909,7 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
if br == nil {
|
||||
br = acquireReader(ctx)
|
||||
}
|
||||
err = ctx.Request.ContinueReadBody(br, maxRequestBodySize, !s.DisablePreParseMultipartForm)
|
||||
err = ctx.Request.ContinueReadBody(br, maxRequestBodySize)
|
||||
if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil {
|
||||
releaseReader(s, br)
|
||||
br = nil
|
||||
|
@ -2083,7 +1928,7 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
}
|
||||
ctx.connID = connID
|
||||
ctx.connRequestNum = connRequestNum
|
||||
ctx.time = time.Now()
|
||||
ctx.time = currentTime
|
||||
s.Handler(ctx)
|
||||
|
||||
timeoutResponse = ctx.timeoutResponse
|
||||
|
@ -2099,13 +1944,10 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
if !ctx.IsGet() && ctx.IsHead() {
|
||||
ctx.Response.SkipBody = true
|
||||
}
|
||||
reqReset = true
|
||||
ctx.Request.Reset()
|
||||
|
||||
hijackHandler = ctx.hijackHandler
|
||||
ctx.hijackHandler = nil
|
||||
hijackNoResponse = ctx.hijackNoResponse && hijackHandler != nil
|
||||
ctx.hijackNoResponse = false
|
||||
|
||||
ctx.userValues.Reset()
|
||||
|
||||
|
@ -2113,10 +1955,8 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
ctx.SetConnectionClose()
|
||||
}
|
||||
|
||||
if writeTimeout > 0 {
|
||||
if err := c.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", s.WriteTimeout, err))
|
||||
}
|
||||
if s.WriteTimeout > 0 || s.MaxKeepaliveDuration > 0 {
|
||||
lastWriteDeadlineTime = s.updateWriteDeadline(c, ctx, lastWriteDeadlineTime)
|
||||
}
|
||||
|
||||
connectionClose = connectionClose || ctx.Response.ConnectionClose()
|
||||
|
@ -2133,32 +1973,30 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
ctx.Response.Header.SetServerBytes(serverName)
|
||||
}
|
||||
|
||||
if !hijackNoResponse {
|
||||
if bw == nil {
|
||||
bw = acquireWriter(ctx)
|
||||
}
|
||||
if err = writeResponse(ctx, bw); err != nil {
|
||||
break
|
||||
}
|
||||
if bw == nil {
|
||||
bw = acquireWriter(ctx)
|
||||
}
|
||||
if err = writeResponse(ctx, bw); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Only flush the writer if we don't have another request in the pipeline.
|
||||
// This is a big of an ugly optimization for https://www.techempower.com/benchmarks/
|
||||
// This benchmark will send 16 pipelined requests. It is faster to pack as many responses
|
||||
// in a TCP packet and send it back at once than waiting for a flush every request.
|
||||
// In real world circumstances this behaviour could be argued as being wrong.
|
||||
if br == nil || br.Buffered() == 0 || connectionClose {
|
||||
err = bw.Flush()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if connectionClose {
|
||||
// Only flush the writer if we don't have another request in the pipeline.
|
||||
// This is a big of an ugly optimization for https://www.techempower.com/benchmarks/
|
||||
// This benchmark will send 16 pipelined requests. It is faster to pack as many responses
|
||||
// in a TCP packet and send it back at once than waiting for a flush every request.
|
||||
// In real world circumstances this behaviour could be argued as being wrong.
|
||||
if br == nil || br.Buffered() == 0 || connectionClose {
|
||||
err = bw.Flush()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if s.ReduceMemoryUsage && hijackHandler == nil {
|
||||
releaseWriter(s, bw)
|
||||
bw = nil
|
||||
}
|
||||
}
|
||||
if connectionClose {
|
||||
break
|
||||
}
|
||||
if s.ReduceMemoryUsage {
|
||||
releaseWriter(s, bw)
|
||||
bw = nil
|
||||
}
|
||||
|
||||
if hijackHandler != nil {
|
||||
|
@ -2167,8 +2005,8 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
hjr = br
|
||||
br = nil
|
||||
|
||||
// br may point to ctx.fbr, so do not return ctx into pool below.
|
||||
ctx = nil
|
||||
// br may point to ctx.fbr, so do not return ctx into pool.
|
||||
ctx = s.acquireCtx(c)
|
||||
}
|
||||
if bw != nil {
|
||||
err = bw.Flush()
|
||||
|
@ -2178,19 +2016,15 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
releaseWriter(s, bw)
|
||||
bw = nil
|
||||
}
|
||||
err = c.SetReadDeadline(zeroTime)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = c.SetWriteDeadline(zeroTime)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
c.SetReadDeadline(zeroTime)
|
||||
c.SetWriteDeadline(zeroTime)
|
||||
go hijackConnHandler(hjr, c, s, hijackHandler)
|
||||
hijackHandler = nil
|
||||
err = errHijacked
|
||||
break
|
||||
}
|
||||
|
||||
currentTime = time.Now()
|
||||
s.setState(c, StateIdle)
|
||||
|
||||
if atomic.LoadInt32(&s.stop) == 1 {
|
||||
|
@ -2205,16 +2039,8 @@ func (s *Server) serveConn(c net.Conn) (err error) {
|
|||
if bw != nil {
|
||||
releaseWriter(s, bw)
|
||||
}
|
||||
if ctx != nil {
|
||||
// in unexpected cases the for loop will break
|
||||
// before request reset call. in such cases, call it before
|
||||
// release to fix #548
|
||||
if !reqReset {
|
||||
ctx.Request.Reset()
|
||||
}
|
||||
s.releaseCtx(ctx)
|
||||
}
|
||||
return
|
||||
s.releaseCtx(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) setState(nc net.Conn, state ConnState) {
|
||||
|
@ -2223,6 +2049,59 @@ func (s *Server) setState(nc net.Conn, state ConnState) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) updateReadDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time {
|
||||
readTimeout := s.ReadTimeout
|
||||
currentTime := ctx.time
|
||||
if s.MaxKeepaliveDuration > 0 {
|
||||
connTimeout := s.MaxKeepaliveDuration - currentTime.Sub(ctx.connTime)
|
||||
if connTimeout <= 0 {
|
||||
return zeroTime
|
||||
}
|
||||
if connTimeout < readTimeout {
|
||||
readTimeout = connTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// Optimization: update read deadline only if more than 25%
|
||||
// of the last read deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
if currentTime.Sub(lastDeadlineTime) > (readTimeout >> 2) {
|
||||
if err := c.SetReadDeadline(currentTime.Add(readTimeout)); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", readTimeout, err))
|
||||
}
|
||||
lastDeadlineTime = currentTime
|
||||
}
|
||||
return lastDeadlineTime
|
||||
}
|
||||
|
||||
func (s *Server) updateWriteDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time {
|
||||
writeTimeout := s.WriteTimeout
|
||||
if s.MaxKeepaliveDuration > 0 {
|
||||
connTimeout := s.MaxKeepaliveDuration - time.Since(ctx.connTime)
|
||||
if connTimeout <= 0 {
|
||||
// MaxKeepAliveDuration exceeded, but let's try sending response anyway
|
||||
// in 100ms with 'Connection: close' header.
|
||||
ctx.SetConnectionClose()
|
||||
connTimeout = 100 * time.Millisecond
|
||||
}
|
||||
if connTimeout < writeTimeout {
|
||||
writeTimeout = connTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// Optimization: update write deadline only if more than 25%
|
||||
// of the last write deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
currentTime := time.Now()
|
||||
if currentTime.Sub(lastDeadlineTime) > (writeTimeout >> 2) {
|
||||
if err := c.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", writeTimeout, err))
|
||||
}
|
||||
lastDeadlineTime = currentTime
|
||||
}
|
||||
return lastDeadlineTime
|
||||
}
|
||||
|
||||
func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) {
|
||||
hjc := s.acquireHijackConn(r, c)
|
||||
h(hjc)
|
||||
|
@ -2230,10 +2109,8 @@ func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) {
|
|||
if br, ok := r.(*bufio.Reader); ok {
|
||||
releaseReader(s, br)
|
||||
}
|
||||
if !s.KeepHijackedConns {
|
||||
c.Close()
|
||||
s.releaseHijackConn(hjc)
|
||||
}
|
||||
c.Close()
|
||||
s.releaseHijackConn(hjc)
|
||||
}
|
||||
|
||||
func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn {
|
||||
|
@ -2242,7 +2119,6 @@ func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn {
|
|||
hjc := &hijackConn{
|
||||
Conn: c,
|
||||
r: r,
|
||||
s: s,
|
||||
}
|
||||
return hjc
|
||||
}
|
||||
|
@ -2261,27 +2137,15 @@ func (s *Server) releaseHijackConn(hjc *hijackConn) {
|
|||
type hijackConn struct {
|
||||
net.Conn
|
||||
r io.Reader
|
||||
s *Server
|
||||
}
|
||||
|
||||
func (c *hijackConn) UnsafeConn() net.Conn {
|
||||
return c.Conn
|
||||
}
|
||||
|
||||
func (c *hijackConn) Read(p []byte) (int, error) {
|
||||
func (c hijackConn) Read(p []byte) (int, error) {
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
||||
func (c *hijackConn) Close() error {
|
||||
if !c.s.KeepHijackedConns {
|
||||
// when we do not keep hijacked connections,
|
||||
// it is closed in hijackConnHandler.
|
||||
return nil
|
||||
}
|
||||
|
||||
conn := c.Conn
|
||||
c.s.releaseHijackConn(c)
|
||||
return conn.Close()
|
||||
func (c hijackConn) Close() error {
|
||||
// hijacked conn is closed in hijackConnHandler.
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastTimeoutErrorResponse returns the last timeout response set
|
||||
|
@ -2310,6 +2174,7 @@ func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) {
|
|||
ctx := *ctxP
|
||||
s := ctx.s
|
||||
c := ctx.c
|
||||
t := ctx.time
|
||||
s.releaseCtx(ctx)
|
||||
|
||||
// Make GC happy, so it could garbage collect ctx
|
||||
|
@ -2317,10 +2182,16 @@ func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) {
|
|||
ctx = nil
|
||||
*ctxP = nil
|
||||
|
||||
var b [1]byte
|
||||
n, err := c.Read(b[:])
|
||||
|
||||
v := s.bytePool.Get()
|
||||
if v == nil {
|
||||
v = make([]byte, 1)
|
||||
}
|
||||
b := v.([]byte)
|
||||
n, err := c.Read(b)
|
||||
ch := b[0]
|
||||
s.bytePool.Put(v)
|
||||
ctx = s.acquireCtx(c)
|
||||
ctx.time = t
|
||||
*ctxP = ctx
|
||||
if err != nil {
|
||||
// Treat all errors as EOF on unsuccessful read
|
||||
|
@ -2332,7 +2203,7 @@ func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) {
|
|||
}
|
||||
|
||||
ctx.fbr.c = c
|
||||
ctx.fbr.ch = b[0]
|
||||
ctx.fbr.ch = ch
|
||||
ctx.fbr.byteRead = false
|
||||
r := acquireReader(ctx)
|
||||
r.Reset(&ctx.fbr)
|
||||
|
@ -2404,6 +2275,7 @@ func (ctx *RequestCtx) Init2(conn net.Conn, logger Logger, reduceMemoryUsage boo
|
|||
ctx.s = fakeServer
|
||||
ctx.connRequestNum = 0
|
||||
ctx.connTime = time.Now()
|
||||
ctx.time = ctx.connTime
|
||||
|
||||
keepBodyBuffer := !reduceMemoryUsage
|
||||
ctx.Request.keepBodyBuffer = keepBodyBuffer
|
||||
|
@ -2531,34 +2403,26 @@ func (s *Server) getServerName() []byte {
|
|||
}
|
||||
|
||||
func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) {
|
||||
w.Write(statusLine(statusCode)) //nolint:errcheck
|
||||
w.Write(statusLine(statusCode))
|
||||
|
||||
server := ""
|
||||
if !s.NoDefaultServerHeader {
|
||||
server = fmt.Sprintf("Server: %s\r\n", s.getServerName())
|
||||
}
|
||||
|
||||
date := ""
|
||||
if !s.NoDefaultDate {
|
||||
serverDateOnce.Do(updateServerDate)
|
||||
date = fmt.Sprintf("Date: %s\r\n", serverDate.Load())
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "Connection: close\r\n"+
|
||||
server+
|
||||
date+
|
||||
"Date: %s\r\n"+
|
||||
"Content-Type: text/plain\r\n"+
|
||||
"Content-Length: %d\r\n"+
|
||||
"\r\n"+
|
||||
"%s",
|
||||
len(msg), msg)
|
||||
serverDate.Load(), len(msg), msg)
|
||||
}
|
||||
|
||||
func defaultErrorHandler(ctx *RequestCtx, err error) {
|
||||
if _, ok := err.(*ErrSmallBuffer); ok {
|
||||
ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge)
|
||||
} else if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() {
|
||||
ctx.Error("Request timeout", StatusRequestTimeout)
|
||||
} else {
|
||||
ctx.Error("Error when parsing request", StatusBadRequest)
|
||||
}
|
||||
|
@ -2579,7 +2443,7 @@ func (s *Server) writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverNam
|
|||
if bw == nil {
|
||||
bw = acquireWriter(ctx)
|
||||
}
|
||||
writeResponse(ctx, bw) //nolint:errcheck
|
||||
writeResponse(ctx, bw)
|
||||
bw.Flush()
|
||||
return bw
|
||||
}
|
||||
|
|
5
vendor/github.com/valyala/fasthttp/stackless/writer.go
generated
vendored
5
vendor/github.com/valyala/fasthttp/stackless/writer.go
generated
vendored
|
@ -75,7 +75,7 @@ func (w *writer) Close() error {
|
|||
|
||||
func (w *writer) Reset(dstW io.Writer) {
|
||||
w.xw.Reset()
|
||||
w.do(opReset) //nolint:errcheck
|
||||
w.do(opReset)
|
||||
w.dstW = dstW
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,8 @@ func (w *xWriter) Write(p []byte) (int, error) {
|
|||
if w.bb == nil {
|
||||
w.bb = bufferPool.Get()
|
||||
}
|
||||
return w.bb.Write(p)
|
||||
w.bb.Write(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *xWriter) Reset() {
|
||||
|
|
63
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
63
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
|
@ -16,45 +16,42 @@ var (
|
|||
strHTTP = []byte("http")
|
||||
strHTTPS = []byte("https")
|
||||
strHTTP11 = []byte("HTTP/1.1")
|
||||
strColon = []byte(":")
|
||||
strColonSlashSlash = []byte("://")
|
||||
strColonSpace = []byte(": ")
|
||||
strGMT = []byte("GMT")
|
||||
strAt = []byte("@")
|
||||
|
||||
strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n")
|
||||
|
||||
strGet = []byte(MethodGet)
|
||||
strHead = []byte(MethodHead)
|
||||
strPost = []byte(MethodPost)
|
||||
strPut = []byte(MethodPut)
|
||||
strDelete = []byte(MethodDelete)
|
||||
strConnect = []byte(MethodConnect)
|
||||
strOptions = []byte(MethodOptions)
|
||||
strTrace = []byte(MethodTrace)
|
||||
strPatch = []byte(MethodPatch)
|
||||
strGet = []byte("GET")
|
||||
strHead = []byte("HEAD")
|
||||
strPost = []byte("POST")
|
||||
strPut = []byte("PUT")
|
||||
strDelete = []byte("DELETE")
|
||||
strConnect = []byte("CONNECT")
|
||||
strOptions = []byte("OPTIONS")
|
||||
strTrace = []byte("TRACE")
|
||||
strPatch = []byte("PATCH")
|
||||
|
||||
strExpect = []byte(HeaderExpect)
|
||||
strConnection = []byte(HeaderConnection)
|
||||
strContentLength = []byte(HeaderContentLength)
|
||||
strContentType = []byte(HeaderContentType)
|
||||
strDate = []byte(HeaderDate)
|
||||
strHost = []byte(HeaderHost)
|
||||
strReferer = []byte(HeaderReferer)
|
||||
strServer = []byte(HeaderServer)
|
||||
strTransferEncoding = []byte(HeaderTransferEncoding)
|
||||
strContentEncoding = []byte(HeaderContentEncoding)
|
||||
strAcceptEncoding = []byte(HeaderAcceptEncoding)
|
||||
strUserAgent = []byte(HeaderUserAgent)
|
||||
strCookie = []byte(HeaderCookie)
|
||||
strSetCookie = []byte(HeaderSetCookie)
|
||||
strLocation = []byte(HeaderLocation)
|
||||
strIfModifiedSince = []byte(HeaderIfModifiedSince)
|
||||
strLastModified = []byte(HeaderLastModified)
|
||||
strAcceptRanges = []byte(HeaderAcceptRanges)
|
||||
strRange = []byte(HeaderRange)
|
||||
strContentRange = []byte(HeaderContentRange)
|
||||
strAuthorization = []byte(HeaderAuthorization)
|
||||
strExpect = []byte("Expect")
|
||||
strConnection = []byte("Connection")
|
||||
strContentLength = []byte("Content-Length")
|
||||
strContentType = []byte("Content-Type")
|
||||
strDate = []byte("Date")
|
||||
strHost = []byte("Host")
|
||||
strReferer = []byte("Referer")
|
||||
strServer = []byte("Server")
|
||||
strTransferEncoding = []byte("Transfer-Encoding")
|
||||
strContentEncoding = []byte("Content-Encoding")
|
||||
strAcceptEncoding = []byte("Accept-Encoding")
|
||||
strUserAgent = []byte("User-Agent")
|
||||
strCookie = []byte("Cookie")
|
||||
strSetCookie = []byte("Set-Cookie")
|
||||
strLocation = []byte("Location")
|
||||
strIfModifiedSince = []byte("If-Modified-Since")
|
||||
strLastModified = []byte("Last-Modified")
|
||||
strAcceptRanges = []byte("Accept-Ranges")
|
||||
strRange = []byte("Range")
|
||||
strContentRange = []byte("Content-Range")
|
||||
|
||||
strCookieExpires = []byte("expires")
|
||||
strCookieDomain = []byte("domain")
|
||||
|
@ -65,7 +62,6 @@ var (
|
|||
strCookieSameSite = []byte("SameSite")
|
||||
strCookieSameSiteLax = []byte("Lax")
|
||||
strCookieSameSiteStrict = []byte("Strict")
|
||||
strCookieSameSiteNone = []byte("None")
|
||||
|
||||
strClose = []byte("close")
|
||||
strGzip = []byte("gzip")
|
||||
|
@ -81,5 +77,4 @@ var (
|
|||
strBytes = []byte("bytes")
|
||||
strTextSlash = []byte("text/")
|
||||
strApplicationSlash = []byte("application/")
|
||||
strBasicSpace = []byte("Basic ")
|
||||
)
|
||||
|
|
50
vendor/github.com/valyala/fasthttp/tcpdialer.go
generated
vendored
50
vendor/github.com/valyala/fasthttp/tcpdialer.go
generated
vendored
|
@ -1,7 +1,6 @@
|
|||
package fasthttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -120,11 +119,6 @@ var (
|
|||
defaultDialer = &TCPDialer{Concurrency: 1000}
|
||||
)
|
||||
|
||||
// Resolver represents interface of the tcp resolver.
|
||||
type Resolver interface {
|
||||
LookupIPAddr(context.Context, string) (names []net.IPAddr, err error)
|
||||
}
|
||||
|
||||
// TCPDialer contains options to control a group of Dial calls.
|
||||
type TCPDialer struct {
|
||||
// Concurrency controls the maximum number of concurrent Dails
|
||||
|
@ -135,24 +129,6 @@ type TCPDialer struct {
|
|||
// Changes made after the first Dial will not affect anything.
|
||||
Concurrency int
|
||||
|
||||
// LocalAddr is the local address to use when dialing an
|
||||
// address.
|
||||
// If nil, a local address is automatically chosen.
|
||||
LocalAddr *net.TCPAddr
|
||||
|
||||
// This may be used to override DNS resolving policy, like this:
|
||||
// var dialer = &fasthttp.TCPDialer{
|
||||
// Resolver: &net.Resolver{
|
||||
// PreferGo: true,
|
||||
// StrictErrors: false,
|
||||
// Dial: func (ctx context.Context, network, address string) (net.Conn, error) {
|
||||
// d := net.Dialer{}
|
||||
// return d.DialContext(ctx, "udp", "8.8.8.8:53")
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
Resolver Resolver
|
||||
|
||||
tcpAddrsLock sync.Mutex
|
||||
tcpAddrsMap map[string]*tcpAddrEntry
|
||||
|
||||
|
@ -289,7 +265,7 @@ func (d *TCPDialer) dial(addr string, dualStack bool, timeout time.Duration) (ne
|
|||
n := uint32(len(addrs))
|
||||
deadline := time.Now().Add(timeout)
|
||||
for n > 0 {
|
||||
conn, err = d.tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh)
|
||||
conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh)
|
||||
if err == nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
@ -302,7 +278,7 @@ func (d *TCPDialer) dial(addr string, dualStack bool, timeout time.Duration) (ne
|
|||
return nil, err
|
||||
}
|
||||
|
||||
func (d *TCPDialer) tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) {
|
||||
func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) {
|
||||
timeout := -time.Since(deadline)
|
||||
if timeout <= 0 {
|
||||
return nil, ErrDialTimeout
|
||||
|
@ -333,7 +309,7 @@ func (d *TCPDialer) tryDial(network string, addr *net.TCPAddr, deadline time.Tim
|
|||
ch := chv.(chan dialResult)
|
||||
go func() {
|
||||
var dr dialResult
|
||||
dr.conn, dr.err = net.DialTCP(network, d.LocalAddr, addr)
|
||||
dr.conn, dr.err = net.DialTCP(network, nil, addr)
|
||||
ch <- dr
|
||||
if concurrencyCh != nil {
|
||||
<-concurrencyCh
|
||||
|
@ -411,7 +387,7 @@ func (d *TCPDialer) getTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, uin
|
|||
d.tcpAddrsLock.Unlock()
|
||||
|
||||
if e == nil {
|
||||
addrs, err := resolveTCPAddrs(addr, dualStack, d.Resolver)
|
||||
addrs, err := resolveTCPAddrs(addr, dualStack)
|
||||
if err != nil {
|
||||
d.tcpAddrsLock.Lock()
|
||||
e = d.tcpAddrsMap[addr]
|
||||
|
@ -436,7 +412,7 @@ func (d *TCPDialer) getTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, uin
|
|||
return e.addrs, idx, nil
|
||||
}
|
||||
|
||||
func resolveTCPAddrs(addr string, dualStack bool, resolver Resolver) ([]net.TCPAddr, error) {
|
||||
func resolveTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, error) {
|
||||
host, portS, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -446,27 +422,21 @@ func resolveTCPAddrs(addr string, dualStack bool, resolver Resolver) ([]net.TCPA
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if resolver == nil {
|
||||
resolver = net.DefaultResolver
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ipaddrs, err := resolver.LookupIPAddr(ctx, host)
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := len(ipaddrs)
|
||||
n := len(ips)
|
||||
addrs := make([]net.TCPAddr, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
ip := ipaddrs[i]
|
||||
if !dualStack && ip.IP.To4() == nil {
|
||||
ip := ips[i]
|
||||
if !dualStack && ip.To4() == nil {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, net.TCPAddr{
|
||||
IP: ip.IP,
|
||||
IP: ip,
|
||||
Port: port,
|
||||
Zone: ip.Zone,
|
||||
})
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
|
|
110
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
110
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
|
@ -36,7 +36,7 @@ var uriPool = &sync.Pool{
|
|||
//
|
||||
// URI instance MUST NOT be used from concurrently running goroutines.
|
||||
type URI struct {
|
||||
noCopy noCopy //nolint:unused,structcheck
|
||||
noCopy noCopy
|
||||
|
||||
pathOriginal []byte
|
||||
scheme []byte
|
||||
|
@ -48,20 +48,10 @@ type URI struct {
|
|||
queryArgs Args
|
||||
parsedQueryArgs bool
|
||||
|
||||
// Path values are sent as-is without normalization
|
||||
//
|
||||
// Disabled path normalization may be useful for proxying incoming requests
|
||||
// to servers that are expecting paths to be forwarded as-is.
|
||||
//
|
||||
// By default path values are normalized, i.e.
|
||||
// extra slashes are removed, special characters are encoded.
|
||||
DisablePathNormalizing bool
|
||||
|
||||
fullURI []byte
|
||||
requestURI []byte
|
||||
|
||||
username []byte
|
||||
password []byte
|
||||
h *RequestHeader
|
||||
}
|
||||
|
||||
// CopyTo copies uri contents to dst.
|
||||
|
@ -73,15 +63,13 @@ func (u *URI) CopyTo(dst *URI) {
|
|||
dst.queryString = append(dst.queryString[:0], u.queryString...)
|
||||
dst.hash = append(dst.hash[:0], u.hash...)
|
||||
dst.host = append(dst.host[:0], u.host...)
|
||||
dst.username = append(dst.username[:0], u.username...)
|
||||
dst.password = append(dst.password[:0], u.password...)
|
||||
|
||||
u.queryArgs.CopyTo(&dst.queryArgs)
|
||||
dst.parsedQueryArgs = u.parsedQueryArgs
|
||||
dst.DisablePathNormalizing = u.DisablePathNormalizing
|
||||
|
||||
// fullURI and requestURI shouldn't be copied, since they are created
|
||||
// from scratch on each FullURI() and RequestURI() call.
|
||||
dst.h = u.h
|
||||
}
|
||||
|
||||
// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe .
|
||||
|
@ -101,36 +89,6 @@ func (u *URI) SetHashBytes(hash []byte) {
|
|||
u.hash = append(u.hash[:0], hash...)
|
||||
}
|
||||
|
||||
// Username returns URI username
|
||||
func (u *URI) Username() []byte {
|
||||
return u.username
|
||||
}
|
||||
|
||||
// SetUsername sets URI username.
|
||||
func (u *URI) SetUsername(username string) {
|
||||
u.username = append(u.username[:0], username...)
|
||||
}
|
||||
|
||||
// SetUsernameBytes sets URI username.
|
||||
func (u *URI) SetUsernameBytes(username []byte) {
|
||||
u.username = append(u.username[:0], username...)
|
||||
}
|
||||
|
||||
// Password returns URI password
|
||||
func (u *URI) Password() []byte {
|
||||
return u.password
|
||||
}
|
||||
|
||||
// SetPassword sets URI password.
|
||||
func (u *URI) SetPassword(password string) {
|
||||
u.password = append(u.password[:0], password...)
|
||||
}
|
||||
|
||||
// SetPasswordBytes sets URI password.
|
||||
func (u *URI) SetPasswordBytes(password []byte) {
|
||||
u.password = append(u.password[:0], password...)
|
||||
}
|
||||
|
||||
// QueryString returns URI query string,
|
||||
// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe .
|
||||
//
|
||||
|
@ -216,25 +174,29 @@ func (u *URI) Reset() {
|
|||
u.path = u.path[:0]
|
||||
u.queryString = u.queryString[:0]
|
||||
u.hash = u.hash[:0]
|
||||
u.username = u.username[:0]
|
||||
u.password = u.password[:0]
|
||||
|
||||
u.host = u.host[:0]
|
||||
u.queryArgs.Reset()
|
||||
u.parsedQueryArgs = false
|
||||
u.DisablePathNormalizing = false
|
||||
|
||||
// There is no need in u.fullURI = u.fullURI[:0], since full uri
|
||||
// is calculated on each call to FullURI().
|
||||
|
||||
// There is no need in u.requestURI = u.requestURI[:0], since requestURI
|
||||
// is calculated on each call to RequestURI().
|
||||
|
||||
u.h = nil
|
||||
}
|
||||
|
||||
// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe .
|
||||
//
|
||||
// Host is always lowercased.
|
||||
func (u *URI) Host() []byte {
|
||||
if len(u.host) == 0 && u.h != nil {
|
||||
u.host = append(u.host[:0], u.h.Host()...)
|
||||
lowercaseBytes(u.host)
|
||||
u.h = nil
|
||||
}
|
||||
return u.host
|
||||
}
|
||||
|
||||
|
@ -257,37 +219,23 @@ func (u *URI) SetHostBytes(host []byte) {
|
|||
//
|
||||
// uri may contain e.g. RequestURI without scheme and host if host is non-empty.
|
||||
func (u *URI) Parse(host, uri []byte) {
|
||||
u.parse(host, uri, false)
|
||||
u.parse(host, uri, nil)
|
||||
}
|
||||
|
||||
func (u *URI) parse(host, uri []byte, isTLS bool) {
|
||||
u.Reset()
|
||||
|
||||
if len(host) == 0 || bytes.Contains(uri, strColonSlashSlash) {
|
||||
scheme, newHost, newURI := splitHostURI(host, uri)
|
||||
u.scheme = append(u.scheme, scheme...)
|
||||
lowercaseBytes(u.scheme)
|
||||
host = newHost
|
||||
uri = newURI
|
||||
}
|
||||
|
||||
func (u *URI) parseQuick(uri []byte, h *RequestHeader, isTLS bool) {
|
||||
u.parse(nil, uri, h)
|
||||
if isTLS {
|
||||
u.scheme = append(u.scheme[:0], strHTTPS...)
|
||||
}
|
||||
}
|
||||
|
||||
if n := bytes.Index(host, strAt); n >= 0 {
|
||||
auth := host[:n]
|
||||
host = host[n+1:]
|
||||
|
||||
if n := bytes.Index(auth, strColon); n >= 0 {
|
||||
u.username = auth[:n]
|
||||
u.password = auth[n+1:]
|
||||
} else {
|
||||
u.username = auth
|
||||
u.password = auth[:0] // Make sure it's not nil
|
||||
}
|
||||
}
|
||||
func (u *URI) parse(host, uri []byte, h *RequestHeader) {
|
||||
u.Reset()
|
||||
u.h = h
|
||||
|
||||
scheme, host, uri := splitHostURI(host, uri)
|
||||
u.scheme = append(u.scheme, scheme...)
|
||||
lowercaseBytes(u.scheme)
|
||||
u.host = append(u.host, host...)
|
||||
lowercaseBytes(u.host)
|
||||
|
||||
|
@ -388,12 +336,7 @@ func normalizePath(dst, src []byte) []byte {
|
|||
|
||||
// RequestURI returns RequestURI - i.e. URI without Scheme and Host.
|
||||
func (u *URI) RequestURI() []byte {
|
||||
var dst []byte
|
||||
if u.DisablePathNormalizing {
|
||||
dst = append(u.requestURI[:0], u.PathOriginal()...)
|
||||
} else {
|
||||
dst = appendQuotedPath(u.requestURI[:0], u.Path())
|
||||
}
|
||||
dst := appendQuotedPath(u.requestURI[:0], u.Path())
|
||||
if u.queryArgs.Len() > 0 {
|
||||
dst = append(dst, '?')
|
||||
dst = u.queryArgs.AppendBytes(dst)
|
||||
|
@ -401,6 +344,10 @@ func (u *URI) RequestURI() []byte {
|
|||
dst = append(dst, '?')
|
||||
dst = append(dst, u.queryString...)
|
||||
}
|
||||
if len(u.hash) > 0 {
|
||||
dst = append(dst, '#')
|
||||
dst = append(dst, u.hash...)
|
||||
}
|
||||
u.requestURI = dst
|
||||
return u.requestURI
|
||||
}
|
||||
|
@ -515,12 +462,7 @@ func (u *URI) FullURI() []byte {
|
|||
// AppendBytes appends full uri to dst and returns the extended dst.
|
||||
func (u *URI) AppendBytes(dst []byte) []byte {
|
||||
dst = u.appendSchemeHost(dst)
|
||||
dst = append(dst, u.RequestURI()...)
|
||||
if len(u.hash) > 0 {
|
||||
dst = append(dst, '#')
|
||||
dst = append(dst, u.hash...)
|
||||
}
|
||||
return dst
|
||||
return append(dst, u.RequestURI()...)
|
||||
}
|
||||
|
||||
func (u *URI) appendSchemeHost(dst []byte) []byte {
|
||||
|
|
54
vendor/github.com/valyala/fasthttp/workerpool.go
generated
vendored
54
vendor/github.com/valyala/fasthttp/workerpool.go
generated
vendored
|
@ -50,11 +50,6 @@ func (wp *workerPool) Start() {
|
|||
}
|
||||
wp.stopCh = make(chan struct{})
|
||||
stopCh := wp.stopCh
|
||||
wp.workerChanPool.New = func() interface{} {
|
||||
return &workerChan{
|
||||
ch: make(chan net.Conn, workerChanCap),
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
var scratch []*workerChan
|
||||
for {
|
||||
|
@ -81,8 +76,8 @@ func (wp *workerPool) Stop() {
|
|||
// serving the connection and noticing wp.mustStop = true.
|
||||
wp.lock.Lock()
|
||||
ready := wp.ready
|
||||
for i := range ready {
|
||||
ready[i].ch <- nil
|
||||
for i, ch := range ready {
|
||||
ch.ch <- nil
|
||||
ready[i] = nil
|
||||
}
|
||||
wp.ready = ready[:0]
|
||||
|
@ -102,34 +97,23 @@ func (wp *workerPool) clean(scratch *[]*workerChan) {
|
|||
|
||||
// Clean least recently used workers if they didn't serve connections
|
||||
// for more than maxIdleWorkerDuration.
|
||||
criticalTime := time.Now().Add(-maxIdleWorkerDuration)
|
||||
currentTime := time.Now()
|
||||
|
||||
wp.lock.Lock()
|
||||
ready := wp.ready
|
||||
n := len(ready)
|
||||
|
||||
// Use binary-search algorithm to find out the index of the least recently worker which can be cleaned up.
|
||||
l, r, mid := 0, n-1, 0
|
||||
for l <= r {
|
||||
mid = (l + r) / 2
|
||||
if criticalTime.After(wp.ready[mid].lastUseTime) {
|
||||
l = mid + 1
|
||||
} else {
|
||||
r = mid - 1
|
||||
i := 0
|
||||
for i < n && currentTime.Sub(ready[i].lastUseTime) > maxIdleWorkerDuration {
|
||||
i++
|
||||
}
|
||||
*scratch = append((*scratch)[:0], ready[:i]...)
|
||||
if i > 0 {
|
||||
m := copy(ready, ready[i:])
|
||||
for i = m; i < n; i++ {
|
||||
ready[i] = nil
|
||||
}
|
||||
wp.ready = ready[:m]
|
||||
}
|
||||
i := r
|
||||
if i == -1 {
|
||||
wp.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
*scratch = append((*scratch)[:0], ready[:i+1]...)
|
||||
m := copy(ready, ready[i+1:])
|
||||
for i = m; i < n; i++ {
|
||||
ready[i] = nil
|
||||
}
|
||||
wp.ready = ready[:m]
|
||||
wp.lock.Unlock()
|
||||
|
||||
// Notify obsolete workers to stop.
|
||||
|
@ -137,8 +121,8 @@ func (wp *workerPool) clean(scratch *[]*workerChan) {
|
|||
// may be blocking and may consume a lot of time if many workers
|
||||
// are located on non-local CPUs.
|
||||
tmp := *scratch
|
||||
for i := range tmp {
|
||||
tmp[i].ch <- nil
|
||||
for i, ch := range tmp {
|
||||
ch.ch <- nil
|
||||
tmp[i] = nil
|
||||
}
|
||||
}
|
||||
|
@ -190,6 +174,11 @@ func (wp *workerPool) getCh() *workerChan {
|
|||
return nil
|
||||
}
|
||||
vch := wp.workerChanPool.Get()
|
||||
if vch == nil {
|
||||
vch = &workerChan{
|
||||
ch: make(chan net.Conn, workerChanCap),
|
||||
}
|
||||
}
|
||||
ch = vch.(*workerChan)
|
||||
go func() {
|
||||
wp.workerFunc(ch)
|
||||
|
@ -225,7 +214,6 @@ func (wp *workerPool) workerFunc(ch *workerChan) {
|
|||
if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") ||
|
||||
strings.Contains(errStr, "reset by peer") ||
|
||||
strings.Contains(errStr, "request headers: small read buffer") ||
|
||||
strings.Contains(errStr, "unexpected EOF") ||
|
||||
strings.Contains(errStr, "i/o timeout")) {
|
||||
wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
|
@ -233,7 +221,7 @@ func (wp *workerPool) workerFunc(ch *workerChan) {
|
|||
if err == errHijacked {
|
||||
wp.connState(c, StateHijacked)
|
||||
} else {
|
||||
_ = c.Close()
|
||||
c.Close()
|
||||
wp.connState(c, StateClosed)
|
||||
}
|
||||
c = nil
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -96,7 +96,7 @@ github.com/klauspost/compress/zstd
|
|||
github.com/klauspost/compress/zstd/internal/xxhash
|
||||
# github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/valyala/bytebufferpool
|
||||
# github.com/valyala/fasthttp v1.12.0
|
||||
# github.com/valyala/fasthttp v1.2.0
|
||||
github.com/valyala/fasthttp
|
||||
github.com/valyala/fasthttp/fasthttputil
|
||||
github.com/valyala/fasthttp/stackless
|
||||
|
|
Loading…
Reference in a new issue