vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2022-09-26 15:44:55 +03:00
parent a2431c2a88
commit 1bac96dfce
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
38 changed files with 624 additions and 427 deletions

16
go.mod
View file

@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.19 go 1.19
require ( require (
cloud.google.com/go/storage v1.26.0 cloud.google.com/go/storage v1.27.0
github.com/VictoriaMetrics/fastcache v1.12.0 github.com/VictoriaMetrics/fastcache v1.12.0
// Do not use the original github.com/valyala/fasthttp because of issues // Do not use the original github.com/valyala/fasthttp because of issues
@ -11,12 +11,12 @@ require (
github.com/VictoriaMetrics/fasthttp v1.1.0 github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.22.2 github.com/VictoriaMetrics/metrics v1.22.2
github.com/VictoriaMetrics/metricsql v0.45.0 github.com/VictoriaMetrics/metricsql v0.45.0
github.com/aws/aws-sdk-go v1.44.102 github.com/aws/aws-sdk-go v1.44.105
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.1.0 github.com/cheggaaa/pb/v3 v3.1.0
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.10.0 github.com/influxdata/influxdb v1.10.0
github.com/klauspost/compress v1.15.10 github.com/klauspost/compress v1.15.11
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.16.3 github.com/urfave/cli/v2 v2.16.3
github.com/valyala/fastjson v1.6.3 github.com/valyala/fastjson v1.6.3
@ -24,10 +24,10 @@ require (
github.com/valyala/fasttemplate v1.2.1 github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.17.0 github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0 github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 golang.org/x/net v0.0.0-20220923203811-8be639271d50
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
google.golang.org/api v0.96.0 google.golang.org/api v0.97.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
@ -52,7 +52,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect github.com/oklog/ulid v1.3.1 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect github.com/prometheus/client_golang v1.13.0 // indirect
@ -67,11 +67,11 @@ require (
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 // indirect golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc // indirect
google.golang.org/grpc v1.49.0 // indirect google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect google.golang.org/protobuf v1.28.1 // indirect
) )

31
go.sum
View file

@ -62,8 +62,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.26.0 h1:lYAGjknyDJirSzfwUlkv4Nsnj7od7foxQNH/fqZqles= cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ=
cloud.google.com/go/storage v1.26.0/go.mod h1:mk/N7YwIKEWyTvXAWQCIeiCTdLoRH6Pd5xmSnolQLTI= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -149,8 +149,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.102 h1:6tUCTGL2UDbFZae1TLGk8vTgeXuzkb8KbAe2FiAeKHc= github.com/aws/aws-sdk-go v1.44.105 h1:UUwoD1PRKIj3ltrDUYTDQj5fOTK3XsnqolLpRTMmSEM=
github.com/aws/aws-sdk-go v1.44.102/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.105/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -578,8 +578,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.10 h1:Ai8UzuomSCDw90e1qNMtb15msBXsNpH6gzkkENQNcJo= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -633,8 +633,9 @@ github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWV
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
@ -1013,8 +1014,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 h1:asZqf0wXastQr+DudYagQS8uBO8bHKeYD1vbAvGmFL8= golang.org/x/net v0.0.0-20220923203811-8be639271d50 h1:vKyz8L3zkd+xrMeIaBsQ/MNVPVFSffdaU3ZyYlBGFnI=
golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1051,8 +1052,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc=
golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1296,8 +1297,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
google.golang.org/api v0.96.0 h1:F60cuQPJq7K7FzsxMYHAUJSiXh2oKctHxBMbDygxhfM= google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ=
google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1389,8 +1390,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc h1:saaNe2+SBQxandnzcD/qB1JEBQ2Pqew+KlFLLdA/XcM=
google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc/go.mod h1:yEEpwVWKMZZzo81NwRgyEJnA2fQvpXAYPVisv8EgDVs=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -1,3 +1,3 @@
{ {
"storage": "1.26.0" "storage": "1.27.0"
} }

View file

@ -1,6 +1,13 @@
# Changes # Changes
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22)
### Features
* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8))
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) ## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)

View file

@ -2,7 +2,7 @@
- [About Cloud Storage](https://cloud.google.com/storage/) - [About Cloud Storage](https://cloud.google.com/storage/)
- [API documentation](https://cloud.google.com/storage/docs) - [API documentation](https://cloud.google.com/storage/docs)
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage) - [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
### Example Usage ### Example Usage

View file

@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
"strings"
"time" "time"
"cloud.google.com/go/compute/metadata" "cloud.google.com/go/compute/metadata"
@ -159,22 +160,17 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
} }
// SignedURL returns a URL for the specified object. Signed URLs allow anyone // SignedURL returns a URL for the specified object. Signed URLs allow anyone
// access to a restricted resource for a limited time without needing a // access to a restricted resource for a limited time without needing a Google
// Google account or signing in. For more information about signed URLs, see // account or signing in.
// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication // For more information about signed URLs, see "[Overview of access control]."
// //
// This method only requires the Method and Expires fields in the specified // This method requires the Method and Expires fields in the specified
// SignedURLOptions opts to be non-nil. If not provided, it attempts to fill the // SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and
// GoogleAccessID and PrivateKey from the GOOGLE_APPLICATION_CREDENTIALS environment variable. // PrivateKey fields in some cases. Read more on the [automatic detection of credentials]
// If you are authenticating with a custom HTTP client, Service Account based // for this method.
// auto-detection will be hindered.
// //
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL. // [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
// This requires the IAM Service Account Credentials API to be enabled // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4]
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
// If you do not want these fields set for you, you may pass them in through opts or use
// SignedURL(bucket, name string, opts *SignedURLOptions) instead.
func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) {
if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
return SignedURL(b.name, object, opts) return SignedURL(b.name, object, opts)
@ -212,18 +208,11 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. // GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. // The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
// //
// This method only requires the Expires field in the specified PostPolicyV4Options // This method requires the Expires field in the specified PostPolicyV4Options
// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey // to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields
// from the GOOGLE_APPLICATION_CREDENTIALS environment variable. // in some cases. Read more on the [automatic detection of credentials] for this method.
// If you are authenticating with a custom HTTP client, Service Account based
// auto-detection will be hindered.
// //
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL. // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4]
// This requires the IAM Service Account Credentials API to be enabled
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
// If you do not want these fields set for you, you may pass them in through opts or use
// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead.
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
return GenerateSignedPostPolicyV4(b.name, object, opts) return GenerateSignedPostPolicyV4(b.name, object, opts)
@ -263,17 +252,27 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
if b.c.creds != nil && len(b.c.creds.JSON) > 0 { if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
var sa struct { var sa struct {
ClientEmail string `json:"client_email"` ClientEmail string `json:"client_email"`
} SAImpersonationURL string `json:"service_account_impersonation_url"`
err := json.Unmarshal(b.c.creds.JSON, &sa) CredType string `json:"type"`
if err == nil && sa.ClientEmail != "" {
return sa.ClientEmail, nil
} else if err != nil {
returnErr = err
} else {
returnErr = errors.New("storage: empty client email in credentials")
} }
err := json.Unmarshal(b.c.creds.JSON, &sa)
if err != nil {
returnErr = err
} else if sa.CredType == "impersonated_service_account" {
start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":")
if end <= start {
returnErr = errors.New("error parsing impersonated service account credentials")
} else {
return sa.SAImpersonationURL[start+1 : end], nil
}
} else if sa.CredType == "service_account" && sa.ClientEmail != "" {
return sa.ClientEmail, nil
} else {
returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported")
}
} }
// Don't error out if we can't unmarshal, fallback to GCE check. // Don't error out if we can't unmarshal, fallback to GCE check.
@ -284,11 +283,11 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
} else if err != nil { } else if err != nil {
returnErr = err returnErr = err
} else { } else {
returnErr = errors.New("got empty email from GCE metadata service") returnErr = errors.New("empty email from GCE metadata service")
} }
} }
return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %v", returnErr) return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr)
} }
func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) {
@ -776,6 +775,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
LocationType: b.GetLocationType(), LocationType: b.GetLocationType(),
RPO: toRPOFromProto(b), RPO: toRPOFromProto(b),
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
} }
} }

View file

@ -24,7 +24,7 @@ connection pooling and similar aspects of this package.
# Creating a Client # Creating a Client
To start working with this package, create a client: To start working with this package, create a [Client]:
ctx := context.Background() ctx := context.Background()
client, err := storage.NewClient(ctx) client, err := storage.NewClient(ctx)
@ -33,7 +33,7 @@ To start working with this package, create a client:
} }
The client will use your default application credentials. Clients should be The client will use your default application credentials. Clients should be
reused instead of created as needed. The methods of Client are safe for reused instead of created as needed. The methods of [Client] are safe for
concurrent use by multiple goroutines. concurrent use by multiple goroutines.
If you only wish to access public data, you can create If you only wish to access public data, you can create
@ -75,7 +75,7 @@ bucket, make a bucket handle:
A handle is a reference to a bucket. You can have a handle even if the A handle is a reference to a bucket. You can have a handle even if the
bucket doesn't exist yet. To create a bucket in Google Cloud Storage, bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
call Create on the handle: call [BucketHandle.Create]:
if err := bkt.Create(ctx, projectID, nil); err != nil { if err := bkt.Create(ctx, projectID, nil); err != nil {
// TODO: Handle error. // TODO: Handle error.
@ -85,9 +85,9 @@ Note that although buckets are associated with projects, bucket names are
global across all projects. global across all projects.
Each bucket has associated metadata, represented in this package by Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set [BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use
Attrs: [BucketHandle.Attrs]:
attrs, err := bkt.Attrs(ctx) attrs, err := bkt.Attrs(ctx)
if err != nil { if err != nil {
@ -101,8 +101,8 @@ Attrs:
An object holds arbitrary data as a sequence of bytes, like a file. You An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets, but unlike buckets refer to objects using a handle, just as with buckets, but unlike buckets
you don't explicitly create an object. Instead, the first time you write you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader to an object it will be created. You can use the standard Go [io.Reader]
and io.Writer interfaces to read and write object data: and [io.Writer] interfaces to read and write object data:
obj := bkt.Object("data") obj := bkt.Object("data")
// Write something to obj. // Write something to obj.
@ -128,7 +128,7 @@ and io.Writer interfaces to read and write object data:
} }
// Prints "This object contains text." // Prints "This object contains text."
Objects also have attributes, which you can fetch with Attrs: Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]:
objAttrs, err := obj.Attrs(ctx) objAttrs, err := obj.Attrs(ctx)
if err != nil { if err != nil {
@ -139,7 +139,7 @@ Objects also have attributes, which you can fetch with Attrs:
# Listing objects # Listing objects
Listing objects in a bucket is done with the Bucket.Objects method: Listing objects in a bucket is done with the [BucketHandle.Objects] method:
query := &storage.Query{Prefix: ""} query := &storage.Query{Prefix: ""}
@ -157,7 +157,7 @@ Listing objects in a bucket is done with the Bucket.Objects method:
} }
Objects are listed lexicographically by name. To filter objects Objects are listed lexicographically by name. To filter objects
lexicographically, Query.StartOffset and/or Query.EndOffset can be used: lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used:
query := &storage.Query{ query := &storage.Query{
Prefix: "", Prefix: "",
@ -168,7 +168,7 @@ lexicographically, Query.StartOffset and/or Query.EndOffset can be used:
// ... as before // ... as before
If only a subset of object attributes is needed when listing, specifying this If only a subset of object attributes is needed when listing, specifying this
subset using Query.SetAttrSelection may speed up the listing process: subset using [Query.SetAttrSelection] may speed up the listing process:
query := &storage.Query{Prefix: ""} query := &storage.Query{Prefix: ""}
query.SetAttrSelection([]string{"Name"}) query.SetAttrSelection([]string{"Name"})
@ -180,10 +180,9 @@ subset using Query.SetAttrSelection may speed up the listing process:
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
ACLRules, each of which specifies the role of a user, group or project. ACLs ACLRules, each of which specifies the role of a user, group or project. ACLs
are suitable for fine-grained control, but you may prefer using IAM to control are suitable for fine-grained control, but you may prefer using IAM to control
access at the project level (see access at the project level (see [Cloud Storage IAM docs].
https://cloud.google.com/storage/docs/access-control/iam).
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]:
acls, err := obj.ACL().List(ctx) acls, err := obj.ACL().List(ctx)
if err != nil { if err != nil {
@ -199,7 +198,7 @@ You can also set and delete ACLs.
Every object has a generation and a metageneration. The generation changes Every object has a generation and a metageneration. The generation changes
whenever the content changes, and the metageneration changes whenever the whenever the content changes, and the metageneration changes whenever the
metadata changes. Conditions let you check these values before an operation; metadata changes. [Conditions] let you check these values before an operation;
the operation only executes if the conditions match. You can use conditions to the operation only executes if the conditions match. You can use conditions to
prevent race conditions in read-modify-write operations. prevent race conditions in read-modify-write operations.
@ -214,8 +213,8 @@ since you read it. Here is how to express that:
You can obtain a URL that lets anyone read or write an object for a limited time. You can obtain a URL that lets anyone read or write an object for a limited time.
Signing a URL requires credentials authorized to sign a URL. To use the same Signing a URL requires credentials authorized to sign a URL. To use the same
authentication that was used when instantiating the Storage client, use the authentication that was used when instantiating the Storage client, use
BucketHandle.SignedURL method. [BucketHandle.SignedURL].
url, err := client.Bucket(bucketName).SignedURL(objectName, opts) url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
if err != nil { if err != nil {
@ -223,8 +222,8 @@ BucketHandle.SignedURL method.
} }
fmt.Println(url) fmt.Println(url)
You can also sign a URL wihout creating a client. See the documentation of You can also sign a URL without creating a client. See the documentation of
SignedURL for details. [SignedURL] for details.
url, err := storage.SignedURL(bucketName, "shared-object", opts) url, err := storage.SignedURL(bucketName, "shared-object", opts)
if err != nil { if err != nil {
@ -238,8 +237,8 @@ A type of signed request that allows uploads through HTML forms directly to Clou
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
by a user. by a user.
For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well For more information, please see the [XML POST Object docs] as well
as the documentation of BucketHandle.GenerateSignedPostPolicyV4. as the documentation of [BucketHandle.GenerateSignedPostPolicyV4].
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
if err != nil { if err != nil {
@ -247,19 +246,40 @@ as the documentation of BucketHandle.GenerateSignedPostPolicyV4.
} }
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
# Credential requirements for signing
If the GoogleAccessID and PrivateKey option fields are not provided, they will
be automatically detected by [BucketHandle.SignedURL] and
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true:
- you are authenticated to the Storage Client with a service account's
downloaded private key, either directly in code or by setting the
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]),
- your application is running on Google Compute Engine (GCE), or
- you are logged into [gcloud using application default credentials]
with [impersonation enabled].
Detecting GoogleAccessID may not be possible if you are authenticated using a
token source or using [option.WithHTTPClient]. In this case, you can provide a
service account email for GoogleAccessID and the client will attempt to sign
the URL or Post Policy using that service account.
To generate the signature, you must have:
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service
account, and
- the [IAM Service Account Credentials API] enabled (unless authenticating
with a downloaded private key).
# Errors # Errors
Errors returned by this client are often of the type googleapi.Error. Errors returned by this client are often of the type [googleapi.Error].
These errors can be introspected for more information by using errors.As These errors can be introspected for more information by using [errors.As]
with the richer googleapi.Error type. For example: with the richer [googleapi.Error] type. For example:
var e *googleapi.Error var e *googleapi.Error
if ok := errors.As(err, &e); ok { if ok := errors.As(err, &e); ok {
if e.Code == 409 { ... } if e.Code == 409 { ... }
} }
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
# Retrying failed requests # Retrying failed requests
Methods in this package may retry calls that fail with transient errors. Methods in this package may retry calls that fail with transient errors.
@ -270,12 +290,12 @@ continuing, use context timeouts or cancellation.
The retry strategy in this library follows best practices for Cloud Storage. By The retry strategy in this library follows best practices for Cloud Storage. By
default, operations are retried only if they are idempotent, and exponential default, operations are retried only if they are idempotent, and exponential
backoff with jitter is employed. In addition, errors are only retried if they backoff with jitter is employed. In addition, errors are only retried if they
are defined as transient by the service. See are defined as transient by the service. See the [Cloud Storage retry docs]
https://cloud.google.com/storage/docs/retry-strategy for more information. for more information.
Users can configure non-default retry behavior for a single library call (using Users can configure non-default retry behavior for a single library call (using
BucketHandle.Retryer and ObjectHandle.Retryer) or for all calls made by a [BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a
client (using Client.SetRetry). For example: client (using [Client.SetRetry]). For example:
o := client.Bucket(bucket).Object(object).Retryer( o := client.Bucket(bucket).Object(object).Retryer(
// Use WithBackoff to change the timing of the exponential backoff. // Use WithBackoff to change the timing of the exponential backoff.
@ -296,5 +316,13 @@ client (using Client.SetRetry). For example:
if err := o.Delete(ctx); err != nil { if err := o.Delete(ctx); err != nil {
// Handle err. // Handle err.
} }
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
*/ */
package storage // import "cloud.google.com/go/storage" package storage // import "cloud.google.com/go/storage"

View file

@ -155,11 +155,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket st
} }
req := &storagepb.CreateBucketRequest{ req := &storagepb.CreateBucketRequest{
Parent: toProjectResource(project), Parent: toProjectResource(project),
Bucket: b, Bucket: b,
BucketId: b.GetName(), BucketId: b.GetName(),
PredefinedAcl: attrs.PredefinedACL, }
PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL, if attrs != nil {
req.PredefinedAcl = attrs.PredefinedACL
req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL
} }
var battrs *BucketAttrs var battrs *BucketAttrs
@ -893,6 +895,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
} }
msg, err = stream.Recv() msg, err = stream.Recv()
// These types of errors show up on the Recv call, rather than the
// initialization of the stream via ReadObject above.
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return ErrObjectNotExist
}
return err return err
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))

View file

@ -26,6 +26,11 @@
// To get started with this package, create a client. // To get started with this package, create a client.
// //
// ctx := context.Background() // ctx := context.Background()
// // This snippet has been automatically generated and should be regarded as a code template only.
// // It will require modifications to work:
// // - It may require correct/in-range values for request initialization.
// // - It may require specifying regional endpoints when creating the service client as shown in:
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
// c, err := storage.NewClient(ctx) // c, err := storage.NewClient(ctx)
// if err != nil { // if err != nil {
// // TODO: Handle error. // // TODO: Handle error.
@ -41,6 +46,11 @@
// The following is an example of making an API call with the newly created client. // The following is an example of making an API call with the newly created client.
// //
// ctx := context.Background() // ctx := context.Background()
// // This snippet has been automatically generated and should be regarded as a code template only.
// // It will require modifications to work:
// // - It may require correct/in-range values for request initialization.
// // - It may require specifying regional endpoints when creating the service client as shown in:
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
// c, err := storage.NewClient(ctx) // c, err := storage.NewClient(ctx)
// if err != nil { // if err != nil {
// // TODO: Handle error. // // TODO: Handle error.

View file

@ -206,7 +206,8 @@ func (c *Client) setGoogleClientInfo(keyval ...string) {
// Connection returns a connection to the API service. // Connection returns a connection to the API service.
// //
// Deprecated. // Deprecated: Connections are now pooled so this method does not always
// return the same resource.
func (c *Client) Connection() *grpc.ClientConn { func (c *Client) Connection() *grpc.ClientConn {
return c.internalClient.Connection() return c.internalClient.Connection()
} }
@ -518,7 +519,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// Connection returns a connection to the API service. // Connection returns a connection to the API service.
// //
// Deprecated. // Deprecated: Connections are now pooled so this method does not always
// return the same resource.
func (c *gRPCClient) Connection() *grpc.ClientConn { func (c *gRPCClient) Connection() *grpc.ClientConn {
return c.connPool.Conn() return c.connPool.Conn()
} }

View file

@ -15,4 +15,4 @@
package internal package internal
// Version is the current tagged release of the library. // Version is the current tagged release of the library.
const Version = "1.26.0" const Version = "1.27.0"

View file

@ -7,5 +7,6 @@
"storage": { "storage": {
"component": "storage" "component": "storage"
} }
} },
"plugins": ["sentence-case"]
} }

View file

@ -33,6 +33,7 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"sort" "sort"
"strconv"
"strings" "strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
@ -2002,6 +2003,24 @@ func parseBucketName(b string) string {
return b[sep+1:] return b[sep+1:]
} }
// parseProjectNumber consume the given resource name and parses out the project
// number if one is present i.e. it is not a project ID.
func parseProjectNumber(r string) uint64 {
projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`)
if matches := projectID.FindStringSubmatch(r); len(matches) > 0 {
// Capture group follows the matched segment. For example:
// input: projects/123/bars/456
// output: [projects/123/, 123]
number, err := strconv.ParseUint(matches[1], 10, 64)
if err != nil {
return 0
}
return number
}
return 0
}
// toProjectResource accepts a project ID and formats it as a Project resource // toProjectResource accepts a project ID and formats it as a Project resource
// name. // name.
func toProjectResource(project string) string { func toProjectResource(project string) string {

View file

@ -5822,12 +5822,42 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "eu-west-2", Region: "eu-west-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-east-1", Region: "us-east-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com",
},
}, },
}, },
"contact-lens": service{ "contact-lens": service{
@ -7283,6 +7313,9 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-southeast-2", Region: "ap-southeast-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
@ -9484,6 +9517,18 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-northeast-1", Region: "ap-northeast-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "eu-west-1", Region: "eu-west-1",
}: endpoint{}, }: endpoint{},
@ -9496,6 +9541,15 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "emr-serverless-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "fips-us-west-2", Region: "fips-us-west-2",
}: endpoint{ }: endpoint{
@ -9505,6 +9559,9 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "us-east-1", Region: "us-east-1",
}: endpoint{}, }: endpoint{},
@ -9514,6 +9571,15 @@ var awsPartition = partition{
}: endpoint{ }: endpoint{
Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
}, },
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "emr-serverless-fips.us-east-2.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
@ -12682,6 +12748,18 @@ var awsPartition = partition{
}, },
"ivschat": service{ "ivschat": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "eu-west-1", Region: "eu-west-1",
}: endpoint{}, }: endpoint{},
@ -17529,6 +17607,9 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-southeast-2", Region: "ap-southeast-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.44.102" const SDKVersion = "1.44.105"

View file

@ -17,6 +17,17 @@ This package provides various compression algorithms.
# changelog # changelog
* Sept 16, 2022 (v1.15.10)
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
* July 21, 2022 (v1.15.9) * July 21, 2022 (v1.15.9)
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
@ -97,15 +108,15 @@ This package provides various compression algorithms.
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
<details>
<summary>See Details</summary>
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
While the release has been extensively tested, it is recommended to testing when upgrading. While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
<details>
<summary>See changes to v1.14.x</summary>
* Feb 22, 2022 (v1.14.4) * Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
@ -131,6 +142,7 @@ While the release has been extensively tested, it is recommended to testing when
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
</details>
<details> <details>
<summary>See changes to v1.13.x</summary> <summary>See changes to v1.13.x</summary>

View file

@ -374,6 +374,12 @@ func hash4(b []byte) uint32 {
return hash4u(binary.LittleEndian.Uint32(b), hashBits) return hash4u(binary.LittleEndian.Uint32(b), hashBits)
} }
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> (32 - h)
}
// bulkHash4 will compute hashes using the same // bulkHash4 will compute hashes using the same
// algorithm as hash4 // algorithm as hash4
func bulkHash4(b []byte, dst []uint32) { func bulkHash4(b []byte, dst []uint32) {

View file

@ -58,17 +58,6 @@ const (
prime8bytes = 0xcf1bbcdcb7a56463 prime8bytes = 0xcf1bbcdcb7a56463
) )
func load32(b []byte, i int) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:4]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:]) return binary.LittleEndian.Uint32(b[i:])
} }
@ -77,10 +66,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:]) return binary.LittleEndian.Uint64(b[i:])
} }
func hash(u uint32) uint32 {
return (u * 0x1e35a7bd) >> tableShift
}
type tableEntry struct { type tableEntry struct {
offset int32 offset int32
} }
@ -115,39 +100,36 @@ func (e *fastGen) addBlock(src []byte) int32 {
return s return s
} }
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> (32 - h)
}
type tableEntryPrev struct { type tableEntryPrev struct {
Cur tableEntry Cur tableEntry
Prev tableEntry Prev tableEntry
} }
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4x64(u uint64, h uint8) uint32 {
return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32)
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64. // Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 { func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
} }
// hash8 returns the hash of u to fit in a hash table with h bits. // hashLen returns a hash of the lowest mls bytes of with length output bits.
// Preferably h should be a constant and should always be <64. // mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
func hash8(u uint64, h uint8) uint32 { // length should always be < 32.
return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) // Preferably length and mls should be a constant for inlining.
} func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. case 3:
// Preferably h should be a constant and should always be <64. return (uint32(u<<8) * prime3bytes) >> (32 - length)
func hash6(u uint64, h uint8) uint32 { case 5:
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
return uint32((u * prime8bytes) >> (64 - length))
default:
return (uint32(u) * prime4bytes) >> (32 - length)
}
} }
// matchlen will return the match length between offsets and t in src. // matchlen will return the match length between offsets and t in src.

View file

@ -19,6 +19,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
const ( const (
inputMargin = 12 - 1 inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
) )
if debugDeflate && e.cur < 0 { if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur)) panic(fmt.Sprint("e.cur < 0: ", e.cur))
@ -68,7 +69,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin) sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from. // nextEmit is where in src the next emitLiteral should start from.
cv := load3232(src, s) cv := load6432(src, s)
for { for {
const skipLog = 5 const skipLog = 5
@ -77,7 +78,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
nextS := s nextS := s
var candidate tableEntry var candidate tableEntry
for { for {
nextHash := hash(cv) nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash] candidate = e.table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit { if nextS > sLimit {
@ -86,16 +87,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
now := load6432(src, nextS) now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur} e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hash(uint32(now)) nextHash = hashLen(now, tableBits, hashBytes)
offset := s - (candidate.offset - e.cur) offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur} e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break break
} }
// Do one right away... // Do one right away...
cv = uint32(now) cv = now
s = nextS s = nextS
nextS++ nextS++
candidate = e.table[nextHash] candidate = e.table[nextHash]
@ -103,11 +104,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
e.table[nextHash] = tableEntry{offset: s + e.cur} e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur) offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur} e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break break
} }
cv = uint32(now) cv = now
s = nextS s = nextS
} }
@ -198,9 +199,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
} }
if s >= sLimit { if s >= sLimit {
// Index first pair after match end. // Index first pair after match end.
if int(s+l+4) < len(src) { if int(s+l+8) < len(src) {
cv := load3232(src, s) cv := load6432(src, s)
e.table[hash(cv)] = tableEntry{offset: s + e.cur} e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
} }
goto emitRemainder goto emitRemainder
} }
@ -213,16 +214,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// three load32 calls. // three load32 calls.
x := load6432(src, s-2) x := load6432(src, s-2)
o := e.cur + s - 2 o := e.cur + s - 2
prevHash := hash(uint32(x)) prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o} e.table[prevHash] = tableEntry{offset: o}
x >>= 16 x >>= 16
currHash := hash(uint32(x)) currHash := hashLen(x, tableBits, hashBytes)
candidate = e.table[currHash] candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2} e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur) offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
cv = uint32(x >> 8) cv = x >> 8
s++ s++
break break
} }

View file

@ -16,6 +16,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
const ( const (
inputMargin = 12 - 1 inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
) )
if debugDeflate && e.cur < 0 { if debugDeflate && e.cur < 0 {
@ -66,7 +67,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin) sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from. // nextEmit is where in src the next emitLiteral should start from.
cv := load3232(src, s) cv := load6432(src, s)
for { for {
// When should we start skipping if we haven't found matches in a long while. // When should we start skipping if we haven't found matches in a long while.
const skipLog = 5 const skipLog = 5
@ -75,7 +76,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
nextS := s nextS := s
var candidate tableEntry var candidate tableEntry
for { for {
nextHash := hash4u(cv, bTableBits) nextHash := hashLen(cv, bTableBits, hashBytes)
s = nextS s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit { if nextS > sLimit {
@ -84,16 +85,16 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
candidate = e.table[nextHash] candidate = e.table[nextHash]
now := load6432(src, nextS) now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur} e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hash4u(uint32(now), bTableBits) nextHash = hashLen(now, bTableBits, hashBytes)
offset := s - (candidate.offset - e.cur) offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur} e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break break
} }
// Do one right away... // Do one right away...
cv = uint32(now) cv = now
s = nextS s = nextS
nextS++ nextS++
candidate = e.table[nextHash] candidate = e.table[nextHash]
@ -101,10 +102,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
e.table[nextHash] = tableEntry{offset: s + e.cur} e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur) offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break break
} }
cv = uint32(now) cv = now
} }
// A 4-byte match has been found. We'll later see if more than 4 bytes // A 4-byte match has been found. We'll later see if more than 4 bytes
@ -154,9 +155,9 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
if s >= sLimit { if s >= sLimit {
// Index first pair after match end. // Index first pair after match end.
if int(s+l+4) < len(src) { if int(s+l+8) < len(src) {
cv := load3232(src, s) cv := load6432(src, s)
e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
} }
goto emitRemainder goto emitRemainder
} }
@ -164,15 +165,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// Store every second hash in-between, but offset by 1. // Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 { for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, i) x := load6432(src, i)
nextHash := hash4u(uint32(x), bTableBits) nextHash := hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i} e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one // Skip one
x >>= 16 x >>= 16
nextHash = hash4u(uint32(x), bTableBits) nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 2} e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
// Skip one // Skip one
x >>= 16 x >>= 16
nextHash = hash4u(uint32(x), bTableBits) nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 4} e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
} }
@ -184,17 +185,17 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// three load32 calls. // three load32 calls.
x := load6432(src, s-2) x := load6432(src, s-2)
o := e.cur + s - 2 o := e.cur + s - 2
prevHash := hash4u(uint32(x), bTableBits) prevHash := hashLen(x, bTableBits, hashBytes)
prevHash2 := hash4u(uint32(x>>8), bTableBits) prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o} e.table[prevHash] = tableEntry{offset: o}
e.table[prevHash2] = tableEntry{offset: o + 1} e.table[prevHash2] = tableEntry{offset: o + 1}
currHash := hash4u(uint32(x>>16), bTableBits) currHash := hashLen(x>>16, bTableBits, hashBytes)
candidate = e.table[currHash] candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2} e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur) offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
cv = uint32(x >> 24) cv = x >> 24
s++ s++
break break
} }

View file

@ -11,10 +11,11 @@ type fastEncL3 struct {
// Encode uses a similar algorithm to level 2, will check up to two candidates. // Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *fastEncL3) Encode(dst *tokens, src []byte) { func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const ( const (
inputMargin = 8 - 1 inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
tableBits = 16 tableBits = 16
tableSize = 1 << tableBits tableSize = 1 << tableBits
hashBytes = 5
) )
if debugDeflate && e.cur < 0 { if debugDeflate && e.cur < 0 {
@ -69,20 +70,20 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin) sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from. // nextEmit is where in src the next emitLiteral should start from.
cv := load3232(src, s) cv := load6432(src, s)
for { for {
const skipLog = 6 const skipLog = 7
nextS := s nextS := s
var candidate tableEntry var candidate tableEntry
for { for {
nextHash := hash4u(cv, tableBits) nextHash := hashLen(cv, tableBits, hashBytes)
s = nextS s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit { if nextS > sLimit {
goto emitRemainder goto emitRemainder
} }
candidates := e.table[nextHash] candidates := e.table[nextHash]
now := load3232(src, nextS) now := load6432(src, nextS)
// Safe offset distance until s + 4... // Safe offset distance until s + 4...
minOffset := e.cur + s - (maxMatchOffset - 4) minOffset := e.cur + s - (maxMatchOffset - 4)
@ -96,8 +97,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
continue continue
} }
if cv == load3232(src, candidate.offset-e.cur) { if uint32(cv) == load3232(src, candidate.offset-e.cur) {
if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
break break
} }
// Both match and are valid, pick longest. // Both match and are valid, pick longest.
@ -112,7 +113,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
// We only check if value mismatches. // We only check if value mismatches.
// Offset will always be invalid in other cases. // Offset will always be invalid in other cases.
candidate = candidates.Prev candidate = candidates.Prev
if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break break
} }
} }
@ -164,9 +165,9 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
if s >= sLimit { if s >= sLimit {
t += l t += l
// Index first pair after match end. // Index first pair after match end.
if int(t+4) < len(src) && t > 0 { if int(t+8) < len(src) && t > 0 {
cv := load3232(src, t) cv = load6432(src, t)
nextHash := hash4u(cv, tableBits) nextHash := hashLen(cv, tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{ e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur, Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t}, Cur: tableEntry{offset: e.cur + t},
@ -176,8 +177,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
} }
// Store every 5th hash in-between. // Store every 5th hash in-between.
for i := s - l + 2; i < s-5; i += 5 { for i := s - l + 2; i < s-5; i += 6 {
nextHash := hash4u(load3232(src, i), tableBits) nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{ e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur, Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + i}} Cur: tableEntry{offset: e.cur + i}}
@ -185,23 +186,23 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
// We could immediately start working at s now, but to improve // We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s. // compression we first update the hash table at s-2 to s.
x := load6432(src, s-2) x := load6432(src, s-2)
prevHash := hash4u(uint32(x), tableBits) prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{ e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur, Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2}, Cur: tableEntry{offset: e.cur + s - 2},
} }
x >>= 8 x >>= 8
prevHash = hash4u(uint32(x), tableBits) prevHash = hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{ e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur, Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1}, Cur: tableEntry{offset: e.cur + s - 1},
} }
x >>= 8 x >>= 8
currHash := hash4u(uint32(x), tableBits) currHash := hashLen(x, tableBits, hashBytes)
candidates := e.table[currHash] candidates := e.table[currHash]
cv = uint32(x) cv = x
e.table[currHash] = tableEntryPrev{ e.table[currHash] = tableEntryPrev{
Prev: candidates.Cur, Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur}, Cur: tableEntry{offset: s + e.cur},
@ -212,17 +213,17 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
minOffset := e.cur + s - (maxMatchOffset - 4) minOffset := e.cur + s - (maxMatchOffset - 4)
if candidate.offset > minOffset { if candidate.offset > minOffset {
if cv == load3232(src, candidate.offset-e.cur) { if uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Found a match... // Found a match...
continue continue
} }
candidate = candidates.Prev candidate = candidates.Prev
if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Match at prev... // Match at prev...
continue continue
} }
} }
cv = uint32(x >> 8) cv = x >> 8
s++ s++
break break
} }

View file

@ -12,6 +12,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
const ( const (
inputMargin = 12 - 1 inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
) )
if debugDeflate && e.cur < 0 { if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur)) panic(fmt.Sprint("e.cur < 0: ", e.cur))
@ -80,7 +81,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
nextS := s nextS := s
var t int32 var t int32
for { for {
nextHashS := hash4x64(cv, tableBits) nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits) nextHashL := hash7(cv, tableBits)
s = nextS s = nextS
@ -168,7 +169,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// Index first pair after match end. // Index first pair after match end.
if int(s+8) < len(src) { if int(s+8) < len(src) {
cv := load6432(src, s) cv := load6432(src, s)
e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
} }
goto emitRemainder goto emitRemainder
@ -183,7 +184,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1} t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2 e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hash4u(uint32(cv>>8), tableBits)] = t2 e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
i += 3 i += 3
for ; i < s-1; i += 3 { for ; i < s-1; i += 3 {
@ -192,7 +193,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1} t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2 e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hash4u(uint32(cv>>8), tableBits)] = t2 e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
} }
} }
} }
@ -201,7 +202,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// compression we first update the hash table at s-1 and at s. // compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1) x := load6432(src, s-1)
o := e.cur + s - 1 o := e.cur + s - 1
prevHashS := hash4x64(x, tableBits) prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits) prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o} e.table[prevHashS] = tableEntry{offset: o}
e.bTable[prevHashL] = tableEntry{offset: o} e.bTable[prevHashL] = tableEntry{offset: o}

View file

@ -12,6 +12,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
const ( const (
inputMargin = 12 - 1 inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
) )
if debugDeflate && e.cur < 0 { if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur)) panic(fmt.Sprint("e.cur < 0: ", e.cur))
@ -88,7 +89,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
var l int32 var l int32
var t int32 var t int32
for { for {
nextHashS := hash4x64(cv, tableBits) nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits) nextHashL := hash7(cv, tableBits)
s = nextS s = nextS
@ -105,7 +106,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
eLong := &e.bTable[nextHashL] eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hash4x64(next, tableBits) nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits) nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur t = lCandidate.Cur.offset - e.cur
@ -257,7 +258,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
if i < s-1 { if i < s-1 {
cv := load6432(src, i) cv := load6432(src, i)
t := tableEntry{offset: i + e.cur} t := tableEntry{offset: i + e.cur}
e.table[hash4x64(cv, tableBits)] = t e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)] eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur eLong.Cur, eLong.Prev = t, eLong.Cur
@ -270,7 +271,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// We only have enough bits for a short entry at i+2 // We only have enough bits for a short entry at i+2
cv >>= 8 cv >>= 8
t = tableEntry{offset: t.offset + 1} t = tableEntry{offset: t.offset + 1}
e.table[hash4x64(cv, tableBits)] = t e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's' // Skip one - otherwise we risk hitting 's'
i += 4 i += 4
@ -280,7 +281,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1} t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)] eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hash4u(uint32(cv>>8), tableBits)] = t2 e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
} }
} }
} }
@ -289,7 +290,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// compression we first update the hash table at s-1 and at s. // compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1) x := load6432(src, s-1)
o := e.cur + s - 1 o := e.cur + s - 1
prevHashS := hash4x64(x, tableBits) prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits) prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o} e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL] eLong := &e.bTable[prevHashL]

View file

@ -12,6 +12,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const ( const (
inputMargin = 12 - 1 inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
) )
if debugDeflate && e.cur < 0 { if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur)) panic(fmt.Sprint("e.cur < 0: ", e.cur))
@ -90,7 +91,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
var l int32 var l int32
var t int32 var t int32
for { for {
nextHashS := hash4x64(cv, tableBits) nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits) nextHashL := hash7(cv, tableBits)
s = nextS s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog nextS = s + doEvery + (s-nextEmit)>>skipLog
@ -107,7 +108,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
eLong.Cur, eLong.Prev = entry, eLong.Cur eLong.Cur, eLong.Prev = entry, eLong.Cur
// Calculate hashes of 'next' // Calculate hashes of 'next'
nextHashS = hash4x64(next, tableBits) nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits) nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur t = lCandidate.Cur.offset - e.cur
@ -286,7 +287,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Index after match end. // Index after match end.
for i := nextS + 1; i < int32(len(src))-8; i += 2 { for i := nextS + 1; i < int32(len(src))-8; i += 2 {
cv := load6432(src, i) cv := load6432(src, i)
e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
eLong := &e.bTable[hash7(cv, tableBits)] eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
} }
@ -301,7 +302,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1} t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)] eLong := &e.bTable[hash7(cv, tableBits)]
eLong2 := &e.bTable[hash7(cv>>8, tableBits)] eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
e.table[hash4x64(cv, tableBits)] = t e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong.Cur, eLong.Prev = t, eLong.Cur eLong.Cur, eLong.Prev = t, eLong.Cur
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
} }

View file

@ -325,35 +325,35 @@ The content compressed in this mode is fully compatible with the standard decode
Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | | File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| |---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | | [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% |
| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | | (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - |
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | | [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% |
| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | | (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - |
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | | [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% |
| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | | (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - |
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | | [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% |
| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | | (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - |
| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | | [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% |
| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | | (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - |
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | | [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% |
| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | | (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - |
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | | [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% |
| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | | (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - |
| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | | sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% |
| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | | (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - |
| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | | [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% |
| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | | (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - |
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | | [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% |
| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | | (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - |
| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | | [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% |
| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | | (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - |
### Legend ### Legend
* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. * `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
* `S2 throughput`: Throughput of S2 in MB/s. * `S2 Throughput`: Throughput of S2 in MB/s.
* `S2 % smaller`: How many percent of the Snappy output size is S2 better. * `S2 % smaller`: How many percent of the Snappy output size is S2 better.
* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. * `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. * `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
@ -361,7 +361,7 @@ Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all th
There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size.
The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
@ -404,15 +404,15 @@ The "better" compression mode will actively look for shorter matches, which is w
Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
| File | S2 Throughput | S2 throughput | | File | S2 Throughput | S2 throughput |
|--------------------------------|--------------|---------------| |--------------------------------|---------------|---------------|
| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | | consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
| 10gb.tar.s2 | 1.30x | 867.07 MB/s | | 10gb.tar.s2 | 1.30x | 867.07 MB/s |
| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | | rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | | github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | | github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
| enwik9.s2 | 1.67x | 681.53 MB/s | | enwik9.s2 | 1.67x | 681.53 MB/s |
| adresser.json.s2 | 3.41x | 4230.53 MB/s | | adresser.json.s2 | 3.41x | 4230.53 MB/s |
| silesia.tar.s2 | 1.52x | 811.58 | | silesia.tar.s2 | 1.52x | 811.58 |
Even though S2 typically compresses better than Snappy, decompression speed is always better. Even though S2 typically compresses better than Snappy, decompression speed is always better.
@ -450,14 +450,14 @@ The most reliable is a wide dataset.
For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. 53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
| * | Input | Output | Reduction | MB/s | | * | Input | Output | Reduction | MB/s |
|-------------------|------------|------------|-----------|--------| |-------------------|------------|------------|------------|------------|
| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | | S2 | 4014735833 | 1059723369 | 73.60% | **936.73** |
| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | | S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 |
| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | | S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 |
| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | | Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 |
| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | | S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 |
| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | | LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 |
S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
"Better" mode provides the same compression speed as LZ4 with better compression ratio. "Better" mode provides the same compression speed as LZ4 with better compression ratio.
@ -489,43 +489,24 @@ AMD64 assembly is use for both S2 and Snappy.
| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | | Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| |-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | | html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s |
| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | | urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s |
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | | fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s |
| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | | fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s |
| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | | paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s |
| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | | html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s |
| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | | alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s |
| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | | asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s |
| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | | lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s |
| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | | plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s |
| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | | geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s |
| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | | kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s |
| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | | alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s |
| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | | alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s |
| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | | alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s |
| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | | alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s |
| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed |
|-----------------------|-------------|------------------|----------|--------------|
| html | 22.31% | 7.58% | 1.07x | 1.20x |
| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x |
| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x |
| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x |
| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x |
| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x |
| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x |
| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x |
| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x |
| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x |
| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x |
| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x |
| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x |
| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x |
| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x |
| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x |
Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
Decompression speed is better than Snappy, except in one case. Decompression speed is better than Snappy, except in one case.
@ -543,43 +524,24 @@ So individual benchmarks should only be seen as a guideline and the overall pict
| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | | Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| |-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | | html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s |
| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | | urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s |
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | | fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s |
| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | | fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s |
| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | | paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s |
| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | | html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s |
| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | | alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s |
| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | | asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s |
| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | | lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s |
| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | | plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s |
| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | | geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s |
| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | | kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s |
| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | | alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s |
| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | | alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s |
| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | | alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s |
| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | | alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s |
| Relative Perf | Snappy size | Better size | Better Speed | Better dec |
|-----------------------|-------------|-------------|--------------|------------|
| html | 22.31% | 13.18% | 0.48x | 0.98x |
| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x |
| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x |
| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x |
| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x |
| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x |
| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x |
| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x |
| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x |
| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x |
| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x |
| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x |
| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x |
| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x |
| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x |
| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x |
Except for the mostly incompressible JPEG image compression is better and usually in the Except for the mostly incompressible JPEG image compression is better and usually in the
double digits in terms of percentage reduction over Snappy. double digits in terms of percentage reduction over Snappy.
@ -605,29 +567,29 @@ Some examples compared on 16 core CPU, amd64 assembly used:
``` ```
* enwik10 * enwik10
Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s
Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s
Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s
* github-june-2days-2019.json * github-june-2days-2019.json
Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s
Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s
Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s
* nyc-taxi-data-10M.csv * nyc-taxi-data-10M.csv
Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s
Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s
Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s
* 10gb.tar * 10gb.tar
Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s
Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s
Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/
* consensus.db.10gb * consensus.db.10gb
Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s
Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s
Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
``` ```
Decompression speed should be around the same as using the 'better' compression mode. Decompression speed should be around the same as using the 'better' compression mode.
@ -648,10 +610,10 @@ If you would like more control, you can use the s2 package as described below:
Snappy compatible blocks can be generated with the S2 encoder. Snappy compatible blocks can be generated with the S2 encoder.
Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
| Snappy | S2 replacement | | Snappy | S2 replacement |
|----------------------------|-------------------------| |---------------------------|-----------------------|
| snappy.Encode(...) | s2.EncodeSnappy(...) | | snappy.Encode(...) | s2.EncodeSnappy(...) |
| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | | snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. `s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
@ -660,12 +622,12 @@ Compression and speed is typically a bit better `MaxEncodedLen` is also smaller
Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: 53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
| Encoder | Size | MB/s | Reduction | | Encoder | Size | MB/s | Reduction |
|-----------------------|------------|------------|------------ |-----------------------|------------|------------|------------|
| snappy.Encode | 1128706759 | 725.59 | 71.89% | | snappy.Encode | 1128706759 | 725.59 | 71.89% |
| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | | s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | | s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| | s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** |
## Streams ## Streams
@ -851,20 +813,20 @@ The block can be read from the front, but contains information so it can be read
Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding),
with un-encoded value length of 64 bits, unless other limits are specified. with un-encoded value length of 64 bits, unless other limits are specified.
| Content | Format | | Content | Format |
|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| |--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
| ID, `[1]byte` | Always 0x99. | | ID, `[1]byte` | Always 0x99. |
| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | | Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | | Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
| UncompressedSize, Varint | Total Uncompressed size. | | UncompressedSize, Varint | Total Uncompressed size. |
| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | | CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. |
| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | | EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. |
| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | | Entries, Varint | Number of Entries in index, must be < 65536 and >=0. |
| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | | HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. |
| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | | UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. |
| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | | CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. |
| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | | Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. |
| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | | Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. |
For regular streams the uncompressed offsets are fully predictable, For regular streams the uncompressed offsets are fully predictable,
so `HasUncompressedOffsets` allows to specify that compressed blocks all have so `HasUncompressedOffsets` allows to specify that compressed blocks all have

View file

@ -35,6 +35,7 @@ type Decoder struct {
br readerWrapper br readerWrapper
enabled bool enabled bool
inFrame bool inFrame bool
dstBuf []byte
} }
frame *frameDec frame *frameDec
@ -187,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
} }
// If bytes buffer and < 5MB, do sync decoding anyway. // If bytes buffer and < 5MB, do sync decoding anyway.
if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
bb2 := bb bb2 := bb
if debugDecoder { if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
} }
b := bb2.Bytes() b := bb2.Bytes()
var dst []byte var dst []byte
if cap(d.current.b) > 0 { if cap(d.syncStream.dstBuf) > 0 {
dst = d.current.b dst = d.syncStream.dstBuf[:0]
} }
dst, err := d.DecodeAll(b, dst[:0]) dst, err := d.DecodeAll(b, dst)
if err == nil { if err == nil {
err = io.EOF err = io.EOF
} }
// Save output buffer
d.syncStream.dstBuf = dst
d.current.b = dst d.current.b = dst
d.current.err = err d.current.err = err
d.current.flushed = true d.current.flushed = true
@ -216,6 +219,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.current.err = nil d.current.err = nil
d.current.flushed = false d.current.flushed = false
d.current.d = nil d.current.d = nil
d.syncStream.dstBuf = nil
// Ensure no-one else is still running... // Ensure no-one else is still running...
d.streamWg.Wait() d.streamWg.Wait()
@ -680,6 +684,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
if debugDecoder { if debugDecoder {
println("Async 1: new history, recent:", block.async.newHist.recentOffsets) println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
} }
hist.reset()
hist.decoders = block.async.newHist.decoders hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize hist.windowSize = block.async.newHist.windowSize
@ -711,6 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
seqExecute <- block seqExecute <- block
} }
close(seqExecute) close(seqExecute)
hist.reset()
}() }()
var wg sync.WaitGroup var wg sync.WaitGroup
@ -734,6 +740,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
if debugDecoder { if debugDecoder {
println("Async 2: new history") println("Async 2: new history")
} }
hist.reset()
hist.windowSize = block.async.newHist.windowSize hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
if block.async.newHist.dict != nil { if block.async.newHist.dict != nil {
@ -815,13 +822,14 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
if debugDecoder { if debugDecoder {
println("decoder goroutines finished") println("decoder goroutines finished")
} }
hist.reset()
}() }()
var hist history
decodeStream: decodeStream:
for { for {
var hist history
var hasErr bool var hasErr bool
hist.reset()
decodeBlock := func(block *blockDec) { decodeBlock := func(block *blockDec) {
if hasErr { if hasErr {
if block != nil { if block != nil {
@ -937,5 +945,6 @@ decodeStream:
} }
close(seqDecode) close(seqDecode)
wg.Wait() wg.Wait()
hist.reset()
d.frame.history.b = frameHistCache d.frame.history.b = frameHistCache
} }

View file

@ -14,21 +14,23 @@ type DOption func(*decoderOptions) error
// options retains accumulated state of multiple options. // options retains accumulated state of multiple options.
type decoderOptions struct { type decoderOptions struct {
lowMem bool lowMem bool
concurrent int concurrent int
maxDecodedSize uint64 maxDecodedSize uint64
maxWindowSize uint64 maxWindowSize uint64
dicts []dict dicts []dict
ignoreChecksum bool ignoreChecksum bool
limitToCap bool limitToCap bool
decodeBufsBelow int
} }
func (o *decoderOptions) setDefault() { func (o *decoderOptions) setDefault() {
*o = decoderOptions{ *o = decoderOptions{
// use less ram: true for now, but may change. // use less ram: true for now, but may change.
lowMem: true, lowMem: true,
concurrent: runtime.GOMAXPROCS(0), concurrent: runtime.GOMAXPROCS(0),
maxWindowSize: MaxWindowSize, maxWindowSize: MaxWindowSize,
decodeBufsBelow: 128 << 10,
} }
if o.concurrent > 4 { if o.concurrent > 4 {
o.concurrent = 4 o.concurrent = 4
@ -126,6 +128,18 @@ func WithDecodeAllCapLimit(b bool) DOption {
} }
} }
// WithDecodeBuffersBelow will fully decode readers that have a
// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
// This typically uses less allocations but will have the full decompressed object in memory.
// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
// Default is 128KiB.
func WithDecodeBuffersBelow(size int) DOption {
return func(o *decoderOptions) error {
o.decodeBufsBelow = size
return nil
}
}
// IgnoreChecksum allows to forcibly ignore checksum checking. // IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption { func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error { return func(o *decoderOptions) error {

View file

@ -32,6 +32,7 @@ type match struct {
length int32 length int32
rep int32 rep int32
est int32 est int32
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
} }
const highScore = 25000 const highScore = 25000

View file

@ -343,7 +343,7 @@ func (d *frameDec) consumeCRC() error {
return nil return nil
} }
// runDecoder will create a sync decoder that will decode a block of data. // runDecoder will run the decoder for the remainder of the frame.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b saved := d.history.b
@ -369,7 +369,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if debugDecoder { if debugDecoder {
println("maxSyncLen:", d.history.decoders.maxSyncLen) println("maxSyncLen:", d.history.decoders.maxSyncLen)
} }
if !d.o.limitToCap && uint64(cap(dst)-len(dst)) < d.history.decoders.maxSyncLen { if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output // Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst) copy(dst2, dst)

View file

@ -21,7 +21,8 @@ type buildDtableAsmContext struct {
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
// Function returns non-zero exit code on error. // Function returns non-zero exit code on error.
// go:noescape //
//go:noescape
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
// please keep in sync with _generate/gen_fse.go // please keep in sync with _generate/gen_fse.go

View file

@ -37,26 +37,23 @@ func (h *history) reset() {
h.ignoreBuffer = 0 h.ignoreBuffer = 0
h.error = false h.error = false
h.recentOffsets = [3]int{1, 4, 8} h.recentOffsets = [3]int{1, 4, 8}
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { h.decoders.freeDecoders()
fseDecoderPool.Put(f)
}
if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
}
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
}
h.decoders = sequenceDecs{br: h.decoders.br} h.decoders = sequenceDecs{br: h.decoders.br}
if h.huffTree != nil { h.freeHuffDecoder()
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
}
}
h.huffTree = nil h.huffTree = nil
h.dict = nil h.dict = nil
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
} }
func (h *history) freeHuffDecoder() {
if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
h.huffTree = nil
}
}
}
func (h *history) setDict(dict *dict) { func (h *history) setDict(dict *dict) {
if dict == nil { if dict == nil {
return return

View file

@ -99,6 +99,21 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
return nil return nil
} }
func (s *sequenceDecs) freeDecoders() {
if f := s.litLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
s.litLengths.fse = nil
}
if f := s.offsets.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
s.offsets.fse = nil
}
if f := s.matchLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
s.matchLengths.fse = nil
}
}
// execute will execute the decoded sequence with the provided history. // execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent. // The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
@ -299,7 +314,10 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
} }
size := ll + ml + len(out) size := ll + ml + len(out)
if size-startSize > maxBlockSize { if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) if size-startSize == 424242 {
panic("here")
}
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
if size > cap(out) { if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions // Not enough size, which can happen under high volume block streaming conditions
@ -411,7 +429,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
// Check if space for literals // Check if space for literals
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
// Add final literals // Add final literals

View file

@ -139,7 +139,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if debugDecoder { if debugDecoder {
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
} }
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default: default:
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
@ -147,7 +147,8 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
s.seqSize += ctx.litRemain s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
err := br.close() err := br.close()
if err != nil { if err != nil {
@ -289,7 +290,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
s.seqSize += ctx.litRemain s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
err := br.close() err := br.close()
if err != nil { if err != nil {

View file

@ -111,7 +111,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
s.seqSize += ll + ml s.seqSize += ll + ml
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
litRemain -= ll litRemain -= ll
if litRemain < 0 { if litRemain < 0 {
@ -149,7 +149,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
s.seqSize += litRemain s.seqSize += litRemain
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
err := br.close() err := br.close()
if err != nil { if err != nil {

View file

@ -289,13 +289,12 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB
// be retried because the data is stored in the MediaBuffer. // be retried because the data is stored in the MediaBuffer.
media, _, _, _ = mi.buffer.Chunk() media, _, _, _ = mi.buffer.Chunk()
} }
toCleanup := []io.Closer{}
if media != nil { if media != nil {
fb := readerFunc(body) fb := readerFunc(body)
fm := readerFunc(media) fm := readerFunc(media)
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
toCleanup := []io.Closer{ toCleanup = append(toCleanup, combined)
combined,
}
if fb != nil && fm != nil { if fb != nil && fm != nil {
getBody = func() (io.ReadCloser, error) { getBody = func() (io.ReadCloser, error) {
rb := ioutil.NopCloser(fb()) rb := ioutil.NopCloser(fb())
@ -309,18 +308,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB
return r, nil return r, nil
} }
} }
cleanup = func() {
for _, closer := range toCleanup {
_ = closer.Close()
}
}
reqHeaders.Set("Content-Type", ctype) reqHeaders.Set("Content-Type", ctype)
body = combined body = combined
} }
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
// This happens when initiating a resumable upload session.
// The initial request contains a JSON body rather than media.
// It can be retried with a getBody function that re-creates the request body.
fb := readerFunc(body)
if fb != nil {
getBody = func() (io.ReadCloser, error) {
rb := ioutil.NopCloser(fb())
toCleanup = append(toCleanup, rb)
return rb, nil
}
}
reqHeaders.Set("X-Upload-Content-Type", mi.mType) reqHeaders.Set("X-Upload-Content-Type", mi.mType)
} }
// Ensure that any bodies created in getBody are cleaned up.
cleanup = func() {
for _, closer := range toCleanup {
_ = closer.Close()
}
}
return body, getBody, cleanup return body, getBody, cleanup
} }

View file

@ -17,6 +17,27 @@ import (
"github.com/googleapis/gax-go/v2" "github.com/googleapis/gax-go/v2"
) )
// Use this error type to return an error which allows introspection of both
// the context error and the error from the service.
type wrappedCallErr struct {
ctxErr error
wrappedErr error
}
func (e wrappedCallErr) Error() string {
return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr)
}
func (e wrappedCallErr) Unwrap() error {
return e.wrappedErr
}
// Is allows errors.Is to match the error from the call as well as context
// sentinel errors.
func (e wrappedCallErr) Is(target error) bool {
return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target)
}
// SendRequest sends a single HTTP request using the given client. // SendRequest sends a single HTTP request using the given client.
// If ctx is non-nil, it calls all hooks, then sends the request with // If ctx is non-nil, it calls all hooks, then sends the request with
// req.WithContext, then calls any functions returned by the hooks in // req.WithContext, then calls any functions returned by the hooks in
@ -96,12 +117,12 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
// If we got an error, and the context has been canceled, // If we got an error and the context has been canceled, return an error acknowledging
// the context's error is probably more useful. // both the context cancelation and the service error.
if err == nil { if err != nil {
err = ctx.Err() return resp, wrappedCallErr{ctx.Err(), err}
} }
return resp, err return resp, ctx.Err()
case <-time.After(pause): case <-time.After(pause):
} }
@ -110,10 +131,10 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r
// select is satisfied at the same time, Go will choose one arbitrarily. // select is satisfied at the same time, Go will choose one arbitrarily.
// That can cause an operation to go through even if the context was // That can cause an operation to go through even if the context was
// canceled before. // canceled before.
if err == nil { if err != nil {
err = ctx.Err() return resp, wrappedCallErr{ctx.Err(), err}
} }
return resp, err return resp, ctx.Err()
} }
invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts)
xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ")

View file

@ -5,4 +5,4 @@
package internal package internal
// Version is the current tagged release of the library. // Version is the current tagged release of the library.
const Version = "0.96.0" const Version = "0.97.0"

18
vendor/modules.txt vendored
View file

@ -10,7 +10,7 @@ cloud.google.com/go/compute/metadata
# cloud.google.com/go/iam v0.4.0 # cloud.google.com/go/iam v0.4.0
## explicit; go 1.17 ## explicit; go 1.17
cloud.google.com/go/iam cloud.google.com/go/iam
# cloud.google.com/go/storage v1.26.0 # cloud.google.com/go/storage v1.27.0
## explicit; go 1.17 ## explicit; go 1.17
cloud.google.com/go/storage cloud.google.com/go/storage
cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal
@ -34,7 +34,7 @@ github.com/VictoriaMetrics/metricsql/binaryop
# github.com/VividCortex/ewma v1.2.0 # github.com/VividCortex/ewma v1.2.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/VividCortex/ewma github.com/VividCortex/ewma
# github.com/aws/aws-sdk-go v1.44.102 # github.com/aws/aws-sdk-go v1.44.105
## explicit; go 1.11 ## explicit; go 1.11
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/arn
@ -157,7 +157,7 @@ github.com/influxdata/influxdb/pkg/escape
# github.com/jmespath/go-jmespath v0.4.0 # github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14 ## explicit; go 1.14
github.com/jmespath/go-jmespath github.com/jmespath/go-jmespath
# github.com/klauspost/compress v1.15.10 # github.com/klauspost/compress v1.15.11
## explicit; go 1.17 ## explicit; go 1.17
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
@ -179,8 +179,8 @@ github.com/mattn/go-isatty
# github.com/mattn/go-runewidth v0.0.14 # github.com/mattn/go-runewidth v0.0.14
## explicit; go 1.9 ## explicit; go 1.9
github.com/mattn/go-runewidth github.com/mattn/go-runewidth
# github.com/matttproud/golang_protobuf_extensions v1.0.1 # github.com/matttproud/golang_protobuf_extensions v1.0.2
## explicit ## explicit; go 1.9
github.com/matttproud/golang_protobuf_extensions/pbutil github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/oklog/ulid v1.3.1 # github.com/oklog/ulid v1.3.1
## explicit ## explicit
@ -280,7 +280,7 @@ go.opencensus.io/trace/tracestate
go.uber.org/atomic go.uber.org/atomic
# go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 # go.uber.org/goleak v1.1.11-0.20210813005559-691160354723
## explicit; go 1.13 ## explicit; go 1.13
# golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 # golang.org/x/net v0.0.0-20220923203811-8be639271d50
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/net/context golang.org/x/net/context
golang.org/x/net/context/ctxhttp golang.org/x/net/context/ctxhttp
@ -302,7 +302,7 @@ golang.org/x/oauth2/google/internal/externalaccount
golang.org/x/oauth2/internal golang.org/x/oauth2/internal
golang.org/x/oauth2/jws golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20220907140024-f12130a52804 # golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7
## explicit ## explicit
golang.org/x/sync/errgroup golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 # golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
@ -320,7 +320,7 @@ golang.org/x/text/unicode/norm
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/xerrors golang.org/x/xerrors
golang.org/x/xerrors/internal golang.org/x/xerrors/internal
# google.golang.org/api v0.96.0 # google.golang.org/api v0.97.0
## explicit; go 1.15 ## explicit; go 1.15
google.golang.org/api/googleapi google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport google.golang.org/api/googleapi/transport
@ -353,7 +353,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 # google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/iam/v1