mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: update github.com/klauspost/compress from v1.12.1 to v1.12.2
This commit is contained in:
parent
4e5a88114a
commit
25b8d71df5
9 changed files with 260 additions and 21 deletions
2
go.mod
2
go.mod
|
@ -16,7 +16,7 @@ require (
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/snappy v0.0.3
|
github.com/golang/snappy v0.0.3
|
||||||
github.com/influxdata/influxdb v1.8.5
|
github.com/influxdata/influxdb v1.8.5
|
||||||
github.com/klauspost/compress v1.12.1
|
github.com/klauspost/compress v1.12.2
|
||||||
github.com/prometheus/client_golang v1.10.0 // indirect
|
github.com/prometheus/client_golang v1.10.0 // indirect
|
||||||
github.com/prometheus/common v0.21.0 // indirect
|
github.com/prometheus/common v0.21.0 // indirect
|
||||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -517,8 +517,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
||||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.12.1 h1:/+xsCsk06wE38cyiqOR/o7U2fSftcH72xD+BQXmja/g=
|
github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
|
||||||
github.com/klauspost/compress v1.12.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
|
|
17
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
17
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
|
@ -182,12 +182,27 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
// them as literal bytes.
|
// them as literal bytes.
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
|
||||||
if l == 0 {
|
if l == 0 {
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||||
} else if l == maxMatchLength {
|
} else if l == maxMatchLength {
|
||||||
l += e.matchlenLong(s+l, t+l, src)
|
l += e.matchlenLong(s+l, t+l, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to locate a better match by checking the end of best match...
|
||||||
|
if sAt := s + l; l < 30 && sAt < sLimit {
|
||||||
|
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
|
||||||
|
// Test current
|
||||||
|
t2 := eLong - e.cur - l
|
||||||
|
off := s - t2
|
||||||
|
if t2 >= 0 && off < maxMatchOffset && off > 0 {
|
||||||
|
if l2 := e.matchlenLong(s, t2, src); l2 > l {
|
||||||
|
t = t2
|
||||||
|
l = l2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Extend backwards
|
// Extend backwards
|
||||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||||
s--
|
s--
|
||||||
|
|
25
vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
25
vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
|
@ -211,6 +211,31 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||||
l += e.matchlenLong(s+l, t+l, src)
|
l += e.matchlenLong(s+l, t+l, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to locate a better match by checking the end-of-match...
|
||||||
|
if sAt := s + l; sAt < sLimit {
|
||||||
|
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
|
||||||
|
// Test current
|
||||||
|
t2 := eLong.Cur.offset - e.cur - l
|
||||||
|
off := s - t2
|
||||||
|
if off < maxMatchOffset {
|
||||||
|
if off > 0 && t2 >= 0 {
|
||||||
|
if l2 := e.matchlenLong(s, t2, src); l2 > l {
|
||||||
|
t = t2
|
||||||
|
l = l2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Test next:
|
||||||
|
t2 = eLong.Prev.offset - e.cur - l
|
||||||
|
off := s - t2
|
||||||
|
if off > 0 && off < maxMatchOffset && t2 >= 0 {
|
||||||
|
if l2 := e.matchlenLong(s, t2, src); l2 > l {
|
||||||
|
t = t2
|
||||||
|
l = l2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Extend backwards
|
// Extend backwards
|
||||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||||
s--
|
s--
|
||||||
|
|
24
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
24
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
|
@ -152,8 +152,8 @@ This package:
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
silesia.tar zskp 1 211947520 73101992 643 313.87
|
silesia.tar zskp 1 211947520 73101992 643 313.87
|
||||||
silesia.tar zskp 2 211947520 67504318 969 208.38
|
silesia.tar zskp 2 211947520 67504318 969 208.38
|
||||||
silesia.tar zskp 3 211947520 65177448 1899 106.44
|
silesia.tar zskp 3 211947520 64595893 2007 100.68
|
||||||
silesia.tar zskp 4 211947520 61381950 8115 24.91
|
silesia.tar zskp 4 211947520 60995370 7691 26.28
|
||||||
|
|
||||||
cgo zstd:
|
cgo zstd:
|
||||||
silesia.tar zstd 1 211947520 73605392 543 371.56
|
silesia.tar zstd 1 211947520 73605392 543 371.56
|
||||||
|
@ -171,8 +171,8 @@ https://files.klauspost.com/compress/gob-stream.7z
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
gob-stream zskp 1 1911399616 235022249 3088 590.30
|
gob-stream zskp 1 1911399616 235022249 3088 590.30
|
||||||
gob-stream zskp 2 1911399616 205669791 3786 481.34
|
gob-stream zskp 2 1911399616 205669791 3786 481.34
|
||||||
gob-stream zskp 3 1911399616 185792019 9324 195.48
|
gob-stream zskp 3 1911399616 175034659 9636 189.17
|
||||||
gob-stream zskp 4 1911399616 171537212 32113 56.76
|
gob-stream zskp 4 1911399616 167273881 29337 62.13
|
||||||
gob-stream zstd 1 1911399616 249810424 2637 691.26
|
gob-stream zstd 1 1911399616 249810424 2637 691.26
|
||||||
gob-stream zstd 3 1911399616 208192146 3490 522.31
|
gob-stream zstd 3 1911399616 208192146 3490 522.31
|
||||||
gob-stream zstd 6 1911399616 193632038 6687 272.56
|
gob-stream zstd 6 1911399616 193632038 6687 272.56
|
||||||
|
@ -187,8 +187,8 @@ http://mattmahoney.net/dc/textdata.html
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
enwik9 zskp 1 1000000000 343848582 3609 264.18
|
enwik9 zskp 1 1000000000 343848582 3609 264.18
|
||||||
enwik9 zskp 2 1000000000 317276632 5746 165.97
|
enwik9 zskp 2 1000000000 317276632 5746 165.97
|
||||||
enwik9 zskp 3 1000000000 294540704 11725 81.34
|
enwik9 zskp 3 1000000000 292243069 12162 78.41
|
||||||
enwik9 zskp 4 1000000000 276609671 44029 21.66
|
enwik9 zskp 4 1000000000 275241169 36430 26.18
|
||||||
enwik9 zstd 1 1000000000 358072021 3110 306.65
|
enwik9 zstd 1 1000000000 358072021 3110 306.65
|
||||||
enwik9 zstd 3 1000000000 313734672 4784 199.35
|
enwik9 zstd 3 1000000000 313734672 4784 199.35
|
||||||
enwik9 zstd 6 1000000000 295138875 10290 92.68
|
enwik9 zstd 6 1000000000 295138875 10290 92.68
|
||||||
|
@ -202,8 +202,8 @@ https://files.klauspost.com/compress/github-june-2days-2019.json.zst
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
|
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
|
||||||
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
|
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
|
||||||
github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54
|
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
|
||||||
github-june-2days-2019.json zskp 4 6273951764 512796117 97791 61.18
|
github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78
|
||||||
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
|
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
|
||||||
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
|
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
|
||||||
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
|
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
|
||||||
|
@ -217,8 +217,8 @@ https://files.klauspost.com/compress/rawstudio-mint14.7z
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
|
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
|
||||||
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
|
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
|
||||||
rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75
|
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
|
||||||
rawstudio-mint14.tar zskp 4 8558382592 3027332295 486243 16.79
|
rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16
|
||||||
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
|
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
|
||||||
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
|
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
|
||||||
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
|
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
|
||||||
|
@ -232,8 +232,8 @@ https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
|
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
|
||||||
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
|
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
|
||||||
nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53
|
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
|
||||||
nyc-taxi-data-10M.csv zskp 4 3325605752 495986829 89368 35.49
|
nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10
|
||||||
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
|
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
|
||||||
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
|
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
|
||||||
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
|
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
|
||||||
|
|
14
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
14
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
|
@ -220,6 +220,20 @@ encodeLoop:
|
||||||
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
||||||
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
|
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
|
||||||
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
|
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
|
||||||
|
|
||||||
|
// See if we can find a better match by checking where the current best ends.
|
||||||
|
// Use that offset to see if we can find a better full match.
|
||||||
|
if sAt := best.s + best.length; sAt < sLimit {
|
||||||
|
nextHashL := hash8(load6432(src, sAt), bestLongTableBits)
|
||||||
|
candidateEnd := e.longTable[nextHashL]
|
||||||
|
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
|
||||||
|
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
|
||||||
|
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
|
||||||
|
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
|
||||||
|
}
|
||||||
|
best = bestEnd
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We have a match, we can store the forward value
|
// We have a match, we can store the forward value
|
||||||
|
|
73
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
73
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
|
@ -412,8 +412,41 @@ encodeLoop:
|
||||||
cv = load6432(src, s)
|
cv = load6432(src, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A 4-byte match has been found. Update recent offsets.
|
// Try to find a better match by searching for a long match at the end of the current best match
|
||||||
// We'll later see if more than 4 bytes.
|
if true && s+matched < sLimit {
|
||||||
|
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
|
||||||
|
cv := load3232(src, s)
|
||||||
|
candidateL := e.longTable[nextHashL]
|
||||||
|
coffsetL := candidateL.offset - e.cur - matched
|
||||||
|
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||||
|
// Found a long match, at least 4 bytes.
|
||||||
|
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||||
|
if matchedNext > matched {
|
||||||
|
t = coffsetL
|
||||||
|
matched = matchedNext
|
||||||
|
if debugMatches {
|
||||||
|
println("long match at end-of-match")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check prev long...
|
||||||
|
if true {
|
||||||
|
coffsetL = candidateL.prev - e.cur - matched
|
||||||
|
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||||
|
// Found a long match, at least 4 bytes.
|
||||||
|
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||||
|
if matchedNext > matched {
|
||||||
|
t = coffsetL
|
||||||
|
matched = matchedNext
|
||||||
|
if debugMatches {
|
||||||
|
println("prev long match at end-of-match")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// A match has been found. Update recent offsets.
|
||||||
offset2 = offset1
|
offset2 = offset1
|
||||||
offset1 = s - t
|
offset1 = s - t
|
||||||
|
|
||||||
|
@ -905,9 +938,41 @@ encodeLoop:
|
||||||
}
|
}
|
||||||
cv = load6432(src, s)
|
cv = load6432(src, s)
|
||||||
}
|
}
|
||||||
|
// Try to find a better match by searching for a long match at the end of the current best match
|
||||||
|
if s+matched < sLimit {
|
||||||
|
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
|
||||||
|
cv := load3232(src, s)
|
||||||
|
candidateL := e.longTable[nextHashL]
|
||||||
|
coffsetL := candidateL.offset - e.cur - matched
|
||||||
|
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||||
|
// Found a long match, at least 4 bytes.
|
||||||
|
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||||
|
if matchedNext > matched {
|
||||||
|
t = coffsetL
|
||||||
|
matched = matchedNext
|
||||||
|
if debugMatches {
|
||||||
|
println("long match at end-of-match")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// A 4-byte match has been found. Update recent offsets.
|
// Check prev long...
|
||||||
// We'll later see if more than 4 bytes.
|
if true {
|
||||||
|
coffsetL = candidateL.prev - e.cur - matched
|
||||||
|
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||||
|
// Found a long match, at least 4 bytes.
|
||||||
|
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||||
|
if matchedNext > matched {
|
||||||
|
t = coffsetL
|
||||||
|
matched = matchedNext
|
||||||
|
if debugMatches {
|
||||||
|
println("prev long match at end-of-match")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// A match has been found. Update recent offsets.
|
||||||
offset2 = offset1
|
offset2 = offset1
|
||||||
offset1 = s - t
|
offset1 = s - t
|
||||||
|
|
||||||
|
|
120
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
Normal file
120
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip.
|
||||||
|
// See https://www.winzip.com/win/en/comp_info.html
|
||||||
|
const ZipMethodWinZip = 93
|
||||||
|
|
||||||
|
// ZipMethodPKWare is the method number used by PKWARE to indicate Zstandard compression.
|
||||||
|
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.7.TXT
|
||||||
|
const ZipMethodPKWare = 20
|
||||||
|
|
||||||
|
var zipReaderPool sync.Pool
|
||||||
|
|
||||||
|
// newZipReader cannot be used since we would leak goroutines...
|
||||||
|
func newZipReader(r io.Reader) io.ReadCloser {
|
||||||
|
dec, ok := zipReaderPool.Get().(*Decoder)
|
||||||
|
if ok {
|
||||||
|
dec.Reset(r)
|
||||||
|
} else {
|
||||||
|
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
dec = d
|
||||||
|
}
|
||||||
|
return &pooledZipReader{dec: dec}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pooledZipReader struct {
|
||||||
|
mu sync.Mutex // guards Close and Read
|
||||||
|
dec *Decoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
if r.dec == nil {
|
||||||
|
return 0, errors.New("Read after Close")
|
||||||
|
}
|
||||||
|
dec, err := r.dec.Read(p)
|
||||||
|
|
||||||
|
return dec, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *pooledZipReader) Close() error {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
var err error
|
||||||
|
if r.dec != nil {
|
||||||
|
err = r.dec.Reset(nil)
|
||||||
|
zipReaderPool.Put(r.dec)
|
||||||
|
r.dec = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type pooledZipWriter struct {
|
||||||
|
mu sync.Mutex // guards Close and Read
|
||||||
|
enc *Encoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
if w.enc == nil {
|
||||||
|
return 0, errors.New("Write after Close")
|
||||||
|
}
|
||||||
|
return w.enc.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *pooledZipWriter) Close() error {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
var err error
|
||||||
|
if w.enc != nil {
|
||||||
|
err = w.enc.Close()
|
||||||
|
zipReaderPool.Put(w.enc)
|
||||||
|
w.enc = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZipCompressor returns a compressor that can be registered with zip libraries.
|
||||||
|
// The provided encoder options will be used on all encodes.
|
||||||
|
func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
|
||||||
|
var pool sync.Pool
|
||||||
|
return func(w io.Writer) (io.WriteCloser, error) {
|
||||||
|
enc, ok := pool.Get().(*Encoder)
|
||||||
|
if ok {
|
||||||
|
enc.Reset(w)
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
enc, err = NewWriter(w, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &pooledZipWriter{enc: enc}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
||||||
|
// See ZipCompressor for example.
|
||||||
|
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
|
||||||
|
return func(r io.Reader) io.ReadCloser {
|
||||||
|
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return d.IOReadCloser()
|
||||||
|
}
|
||||||
|
}
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -125,7 +125,7 @@ github.com/jmespath/go-jmespath
|
||||||
github.com/jstemmer/go-junit-report
|
github.com/jstemmer/go-junit-report
|
||||||
github.com/jstemmer/go-junit-report/formatter
|
github.com/jstemmer/go-junit-report/formatter
|
||||||
github.com/jstemmer/go-junit-report/parser
|
github.com/jstemmer/go-junit-report/parser
|
||||||
# github.com/klauspost/compress v1.12.1
|
# github.com/klauspost/compress v1.12.2
|
||||||
## explicit
|
## explicit
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
|
|
Loading…
Reference in a new issue