vendor: update github.com/klauspost/compress from v1.13.1 to v1.13.4

This commit is contained in:
Aliaksandr Valialkin 2021-08-12 12:38:41 +03:00
parent 95dd5a48bb
commit 556c1b36e5
25 changed files with 1083 additions and 202 deletions

2
go.mod
View file

@ -20,7 +20,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.3 github.com/influxdata/influxdb v1.9.3
github.com/klauspost/compress v1.13.1 github.com/klauspost/compress v1.13.4
github.com/mattn/go-isatty v0.0.13 // indirect github.com/mattn/go-isatty v0.0.13 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/prometheus/common v0.29.0 // indirect github.com/prometheus/common v0.29.0 // indirect

4
go.sum
View file

@ -622,8 +622,8 @@ github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=

2
vendor/github.com/klauspost/compress/.gitattributes generated vendored Normal file
View file

@ -0,0 +1,2 @@
* -text
*.bin -text -diff

25
vendor/github.com/klauspost/compress/.gitignore generated vendored Normal file
View file

@ -0,0 +1,25 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe

137
vendor/github.com/klauspost/compress/.goreleaser.yml generated vendored Normal file
View file

@ -0,0 +1,137 @@
# This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
before:
hooks:
- ./gen.sh
builds:
-
id: "s2c"
binary: s2c
main: ./s2/cmd/s2c/main.go
flags:
- -trimpath
env:
- CGO_ENABLED=0
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm
- arm64
- ppc64
- ppc64le
- mips64
- mips64le
goarm:
- 7
-
id: "s2d"
binary: s2d
main: ./s2/cmd/s2d/main.go
flags:
- -trimpath
env:
- CGO_ENABLED=0
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm
- arm64
- ppc64
- ppc64le
- mips64
- mips64le
goarm:
- 7
-
id: "s2sx"
binary: s2sx
main: ./s2/cmd/_s2sx/main.go
flags:
- -modfile=s2sx.mod
- -trimpath
env:
- CGO_ENABLED=0
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm
- arm64
- ppc64
- ppc64le
- mips64
- mips64le
goarm:
- 7
archives:
-
id: s2-binaries
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
format_overrides:
- goos: windows
format: zip
files:
- unpack/*
- s2/LICENSE
- s2/README.md
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^doc:'
- '^docs:'
- '^test:'
- '^tests:'
- '^Update\sREADME.md'
nfpms:
-
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
vendor: Klaus Post
homepage: https://github.com/klauspost/compress
maintainer: Klaus Post <klauspost@gmail.com>
description: S2 Compression Tool
license: BSD 3-Clause
formats:
- deb
- rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

426
vendor/github.com/klauspost/compress/README.md generated vendored Normal file
View file

@ -0,0 +1,426 @@
# compress
This package provides various compression algorithms.
* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# changelog
* Aug 3, 2021 (v1.13.3)
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
* s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
* zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
* Jun 14, 2021 (v1.13.1)
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
* s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
* Jun 3, 2021 (v1.13.0)
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
* May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
* zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
* Apr 27, 2021 (v1.12.2)
* zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
* zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
* deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
* s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
* s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
* s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
* Apr 14, 2021 (v1.12.1)
* snappy package removed. Upstream added as dependency.
* s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
* s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
* s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
* deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
* s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
* Mar 5, 2021 (v1.11.12)
* s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
* s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
* Mar 1, 2021 (v1.11.9)
* s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
* s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
* zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
* Jan 14, 2021 (v1.11.7)
* Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
* s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
* s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
* s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
* s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
* Jan 7, 2021 (v1.11.6)
* zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
* zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
* Dec 20, 2020 (v1.11.4)
* zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
* Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
* s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
* Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
* zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
* Nov 15, 2020 (v1.11.3)
* inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
* zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
* Oct 11, 2020 (v1.11.2)
* s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
* Oct 1, 2020 (v1.11.1)
* zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
* Sept 8, 2020 (v1.11.0)
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
<details>
<summary>See changes prior to v1.11.0</summary>
* July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
* huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
* June 23, 2020 (v1.10.10)
* zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
* June 16, 2020 (v1.10.9):
* zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
* zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
* Fuzzit tests removed. The service has been purchased and is no longer available.
* June 5, 2020 (v1.10.8):
* 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
* June 1, 2020 (v1.10.7):
* Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
* Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
* Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
* May 21, 2020: (v1.10.6)
* zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
* zstd: Stricter decompression checks.
* April 12, 2020: (v1.10.5)
* s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
* Apr 8, 2020: (v1.10.4)
* zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
* Mar 11, 2020: (v1.10.3)
* s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
* s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
* zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
* zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
* zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
* Feb 27, 2020: (v1.10.2)
* Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
* Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
* Feb 18, 2020: (v1.10.1)
* Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
* deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
* Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
* Feb 4, 2020: (v1.10.0)
* Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
* Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
* Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
* Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
</details>
<details>
<summary>See changes prior to v1.10.0</summary>
* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
* Nov 11, 2019: Reduce inflate memory use by 1KB.
* Nov 10, 2019: Less allocations in deflate bit writer.
* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
</details>
<details>
<summary>See changes prior to v1.9.0</summary>
* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
* June 17, 2019: zstd decompression bugfix.
* June 17, 2019: fix 32 bit builds.
* June 17, 2019: Easier use in modules (less dependencies).
* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
* May 28, 2017: Reduce allocations when resetting decoder.
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
* Mar 24, 2016: Small speedup for level 1-3.
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
</details>
# deflate usage
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
|--------------------|-----------------------------------------|--------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below.
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
It will do compression but without maintaining any state between Write calls.
This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
This is only relevant in cases where you expect to run many thousands of compressors concurrently,
but with very little activity. This is *not* intended for regular web servers serving individual requests.
Because of this, the size of actual Write calls will affect output size.
In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
```
// replace 'ioutil.Discard' with your output.
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
if err != nil {
return err
}
defer gzw.Close()
w := bufio.NewWriterSize(gzw, 4096)
defer w.Flush()
// Write to 'w'
```
This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
# Performance Update 2018
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
## Overall differences.
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
## Web Content
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
## Object files
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
## Huffman only compression
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.

85
vendor/github.com/klauspost/compress/compressible.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
package compress
import "math"
// Estimate returns a normalized compressibility estimate of block b.
// Values close to zero are likely uncompressible.
// Values above 0.1 are likely to be compressible.
// Values above 0.5 are very compressible.
// Very small lengths will return 0.
func Estimate(b []byte) float64 {
if len(b) < 16 {
return 0
}
// Correctly predicted order 1
hits := 0
lastMatch := false
var o1 [256]byte
var hist [256]int
c1 := byte(0)
for _, c := range b {
if c == o1[c1] {
// We only count a hit if there was two correct predictions in a row.
if lastMatch {
hits++
}
lastMatch = true
} else {
lastMatch = false
}
o1[c1] = c
c1 = c
hist[c]++
}
// Use x^0.6 to give better spread
prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
// Calculate histogram distribution
variance := float64(0)
avg := float64(len(b)) / 256
for _, v := range hist {
Δ := float64(v) - avg
variance += Δ * Δ
}
stddev := math.Sqrt(float64(variance)) / float64(len(b))
exp := math.Sqrt(1 / float64(len(b)))
// Subtract expected stddev
stddev -= exp
if stddev < 0 {
stddev = 0
}
stddev *= 1 + exp
// Use x^0.4 to give better spread
entropy := math.Pow(stddev, 0.4)
// 50/50 weight between prediction and histogram distribution
return math.Pow((prediction+entropy)/2, 0.9)
}
// ShannonEntropyBits returns the number of bits minimum required to represent
// an entropy encoding of the input bytes.
// https://en.wiktionary.org/wiki/Shannon_entropy
func ShannonEntropyBits(b []byte) int {
if len(b) == 0 {
return 0
}
var hist [256]int
for _, c := range b {
hist[c]++
}
shannon := float64(0)
invTotal := 1.0 / float64(len(b))
for _, v := range hist[:] {
if v > 0 {
n := float64(v)
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
}
}
return int(math.Ceil(shannon))
}

4
vendor/github.com/klauspost/compress/gen.sh generated vendored Normal file
View file

@ -0,0 +1,4 @@
#!/bin/sh
cd s2/cmd/_s2sx/ || exit 1
go generate .

5
vendor/github.com/klauspost/compress/go.mod generated vendored Normal file
View file

@ -0,0 +1,5 @@
module github.com/klauspost/compress
go 1.13
require github.com/golang/snappy v0.0.3 // indirect

2
vendor/github.com/klauspost/compress/go.sum generated vendored Normal file
View file

@ -0,0 +1,2 @@
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=

View file

@ -161,6 +161,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
return s.Out, false, nil return s.Out, false, nil
} }
// EstimateSizes will estimate the data sizes
func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
s, err = s.prepare(in)
if err != nil {
return 0, 0, 0, err
}
// Create histogram, if none was provided.
tableSz, dataSz, reuseSz = -1, -1, -1
maxCount := s.maxCount
var canReuse = false
if maxCount == 0 {
maxCount, canReuse = s.countSimple(in)
} else {
canReuse = s.canUseTable(s.prevTable)
}
// We want the output size to be less than this:
wantSize := len(in)
if s.WantLogLess > 0 {
wantSize -= wantSize >> s.WantLogLess
}
// Reset for next run.
s.clearCount = true
s.maxCount = 0
if maxCount >= len(in) {
if maxCount > len(in) {
return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
}
if len(in) == 1 {
return 0, 0, 0, ErrIncompressible
}
// One symbol, use RLE
return 0, 0, 0, ErrUseRLE
}
if maxCount == 1 || maxCount < (len(in)>>7) {
// Each symbol present maximum once or too well distributed.
return 0, 0, 0, ErrIncompressible
}
// Calculate new table.
err = s.buildCTable()
if err != nil {
return 0, 0, 0, err
}
if false && !s.canUseTable(s.cTable) {
panic("invalid table generated")
}
tableSz, err = s.cTable.estTableSize(s)
if err != nil {
return 0, 0, 0, err
}
if canReuse {
reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
}
dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
// Restore
return tableSz, dataSz, reuseSz, nil
}
func (s *Scratch) compress1X(src []byte) ([]byte, error) { func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src) return s.compress1xDo(s.Out, src)
} }

View file

@ -245,6 +245,68 @@ func (c cTable) write(s *Scratch) error {
return nil return nil
} }
func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
var (
// precomputed conversion table
bitsToWeight [tableLogMax + 1]byte
huffLog = s.actualTableLog
// last weight is not saved.
maxSymbolValue = uint8(s.symbolLen - 1)
huffWeight = s.huffWeight[:256]
)
const (
maxFSETableLog = 6
)
// convert to weight
bitsToWeight[0] = 0
for n := uint8(1); n < huffLog+1; n++ {
bitsToWeight[n] = huffLog + 1 - n
}
// Acquire histogram for FSE.
hist := s.fse.Histogram()
hist = hist[:256]
for i := range hist[:16] {
hist[i] = 0
}
for n := uint8(0); n < maxSymbolValue; n++ {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
}
// FSE compress if feasible.
if maxSymbolValue >= 2 {
huffMaxCnt := uint32(0)
huffMax := uint8(0)
for i, v := range hist[:16] {
if v == 0 {
continue
}
huffMax = byte(i)
if v > huffMaxCnt {
huffMaxCnt = v
}
}
s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
s.fse.TableLog = maxFSETableLog
b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
if err == nil && len(b) < int(s.symbolLen>>1) {
sz += 1 + len(b)
return sz, nil
}
// Unable to compress (RLE/uncompressible)
}
// write raw values as 4-bits (max : 15)
if maxSymbolValue > (256 - 128) {
// should not happen : likely means source cannot be compressed
return 0, ErrIncompressible
}
// special case, pack weights 4 bits/weight.
sz += 1 + int(maxSymbolValue/2)
return sz, nil
}
// estimateSize returns the estimated size in bytes of the input represented in the // estimateSize returns the estimated size in bytes of the input represented in the
// histogram supplied. // histogram supplied.
func (c cTable) estimateSize(hist []uint32) int { func (c cTable) estimateSize(hist []uint32) int {

5
vendor/github.com/klauspost/compress/s2sx.mod generated vendored Normal file
View file

@ -0,0 +1,5 @@
module github.com/klauspost/compress
go 1.16
require github.com/golang/snappy v0.0.3 // indirect

2
vendor/github.com/klauspost/compress/s2sx.sum generated vendored Normal file
View file

@ -0,0 +1,2 @@
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=

View file

@ -152,7 +152,7 @@ file out level insize outsize millis mb/s
silesia.tar zskp 1 211947520 73101992 643 313.87 silesia.tar zskp 1 211947520 73101992 643 313.87
silesia.tar zskp 2 211947520 67504318 969 208.38 silesia.tar zskp 2 211947520 67504318 969 208.38
silesia.tar zskp 3 211947520 64595893 2007 100.68 silesia.tar zskp 3 211947520 64595893 2007 100.68
silesia.tar zskp 4 211947520 60995370 7691 26.28 silesia.tar zskp 4 211947520 60995370 8825 22.90
cgo zstd: cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56 silesia.tar zstd 1 211947520 73605392 543 371.56
@ -162,7 +162,7 @@ silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package: gzip, stdlib/this package:
silesia.tar gzstd 1 211947520 80007735 1654 122.21 silesia.tar gzstd 1 211947520 80007735 1654 122.21
silesia.tar gzkp 1 211947520 80369488 1168 173.06 silesia.tar gzkp 1 211947520 80136201 1152 175.45
GOB stream of binary data. Highly compressible. GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z https://files.klauspost.com/compress/gob-stream.7z
@ -171,13 +171,15 @@ file out level insize outsize millis mb/s
gob-stream zskp 1 1911399616 235022249 3088 590.30 gob-stream zskp 1 1911399616 235022249 3088 590.30
gob-stream zskp 2 1911399616 205669791 3786 481.34 gob-stream zskp 2 1911399616 205669791 3786 481.34
gob-stream zskp 3 1911399616 175034659 9636 189.17 gob-stream zskp 3 1911399616 175034659 9636 189.17
gob-stream zskp 4 1911399616 167273881 29337 62.13 gob-stream zskp 4 1911399616 165609838 50369 36.19
gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70 gob-stream zstd 9 1911399616 177620386 16175 112.70
gob-stream gzstd 1 1911399616 357382641 10251 177.82 gob-stream gzstd 1 1911399616 357382641 10251 177.82
gob-stream gzkp 1 1911399616 362156523 5695 320.08 gob-stream gzkp 1 1911399616 359753026 5438 335.20
The test data for the Large Text Compression Benchmark is the first The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
@ -187,11 +189,13 @@ file out level insize outsize millis mb/s
enwik9 zskp 1 1000000000 343848582 3609 264.18 enwik9 zskp 1 1000000000 343848582 3609 264.18
enwik9 zskp 2 1000000000 317276632 5746 165.97 enwik9 zskp 2 1000000000 317276632 5746 165.97
enwik9 zskp 3 1000000000 292243069 12162 78.41 enwik9 zskp 3 1000000000 292243069 12162 78.41
enwik9 zskp 4 1000000000 275241169 36430 26.18 enwik9 zskp 4 1000000000 262183768 82837 11.51
enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40 enwik9 zstd 9 1000000000 278348700 28549 33.40
enwik9 gzstd 1 1000000000 382578136 9604 99.30 enwik9 gzstd 1 1000000000 382578136 9604 99.30
enwik9 gzkp 1 1000000000 383825945 6544 145.73 enwik9 gzkp 1 1000000000 383825945 6544 145.73
@ -202,13 +206,15 @@ file out level insize outsize millis mb/s
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78 github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
VM Image, Linux mint with a few installed applications: VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z https://files.klauspost.com/compress/rawstudio-mint14.7z
@ -217,13 +223,15 @@ file out level insize outsize millis mb/s
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16 rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
CSV data: CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
@ -232,13 +240,15 @@ file out level insize outsize millis mb/s
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10 nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
``` ```
## Decompressor ## Decompressor

View file

@ -260,9 +260,10 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if len(d.current.b) > 0 { if len(d.current.b) > 0 {
n2, err2 := w.Write(d.current.b) n2, err2 := w.Write(d.current.b)
n += int64(n2) n += int64(n2)
if err2 != nil && d.current.err == nil { if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
d.current.err = err2 d.current.err = err2
break } else if n2 != len(d.current.b) {
d.current.err = io.ErrShortWrite
} }
} }
if d.current.err != nil { if d.current.err != nil {

View file

@ -38,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
// WindowSize returns the window size of the encoder, // WindowSize returns the window size of the encoder,
// or a window size small enough to contain the input size, if > 0. // or a window size small enough to contain the input size, if > 0.
func (e *fastBase) WindowSize(size int) int32 { func (e *fastBase) WindowSize(size int64) int32 {
if size > 0 && size < int(e.maxMatchOff) { if size > 0 && size < int64(e.maxMatchOff) {
b := int32(1) << uint(bits.Len(uint(size))) b := int32(1) << uint(bits.Len(uint(size)))
// Keep minimum window. // Keep minimum window.
if b < 1024 { if b < 1024 {

View file

@ -5,22 +5,61 @@
package zstd package zstd
import ( import (
"bytes"
"fmt" "fmt"
"math/bits"
"github.com/klauspost/compress"
) )
const ( const (
bestLongTableBits = 20 // Bits used in the long match table bestLongTableBits = 22 // Bits used in the long match table
bestLongTableSize = 1 << bestLongTableBits // Size of the table bestLongTableSize = 1 << bestLongTableBits // Size of the table
bestLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter // Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the // can actually lead to compression degradation since it will 'steal' more from the
// long match table and match offsets are quite big. // long match table and match offsets are quite big.
// This greatly depends on the type of input. // This greatly depends on the type of input.
bestShortTableBits = 16 // Bits used in the short match table bestShortTableBits = 18 // Bits used in the short match table
bestShortTableSize = 1 << bestShortTableBits // Size of the table bestShortTableSize = 1 << bestShortTableBits // Size of the table
bestShortLen = 4 // Bytes used for table hash
) )
type match struct {
offset int32
s int32
length int32
rep int32
est int32
}
const highScore = 25000
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
mlc := mlCode(uint32(m.length - zstdMinMatch))
var ofc uint8
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
ofc = ofCode(uint32(m.rep))
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
// Add cost of match encoding...
m.est = int32(ofTT.outBits + mlTT.outBits)
m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
// Subtract savings compared to literal encoding...
m.est -= (m.length * bitsPerByte) >> 10
if m.est > 0 {
// Unlikely gain..
m.length = 0
m.est = highScore
}
}
// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. // bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
// The long match table contains the previous entry with the same hash, // The long match table contains the previous entry with the same hash,
// effectively making it a "chain" of length 2. // effectively making it a "chain" of length 2.
@ -109,6 +148,14 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
return return
} }
// Use this to estimate literal cost.
// Scaled by 10 bits.
bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
// Huffman can never go < 1 bit/byte
if bitsPerByte < 1024 {
bitsPerByte = 1024
}
// Override src // Override src
src = e.hist src = e.hist
sLimit := int32(len(src)) - inputMargin sLimit := int32(len(src)) - inputMargin
@ -145,51 +192,49 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
type match struct {
offset int32
s int32
length int32
rep int32
}
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{offset: offset, s: s}
}
return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
}
bestOf := func(a, b match) match { bestOf := func(a, b match) match {
aScore := b.s - a.s + a.length if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
bScore := a.s - b.s + b.length
if a.rep < 0 {
aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
}
if b.rep < 0 {
bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
}
if aScore >= bScore {
return a return a
} }
return b return b
} }
const goodEnough = 100 const goodEnough = 100
nextHashL := hash8(cv, bestLongTableBits) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hash4x64(cv, bestShortTableBits) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{s: s, est: highScore}
}
if debugAsserts {
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
m.estBits(bitsPerByte)
return m
}
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
if canRepeat && best.length < goodEnough { if canRepeat && best.length < goodEnough {
best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) cv32 := uint32(cv >> 8)
best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) spp := s + 1
best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
if best.length > 0 { if best.length > 0 {
best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) cv32 = uint32(cv >> 24)
best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) spp += 2
best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
} }
} }
// Load next and check... // Load next and check...
@ -209,22 +254,28 @@ encodeLoop:
} }
s++ s++
candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s) cv = load6432(src, s)
cv2 := load6432(src, s+1) cv2 := load6432(src, s+1)
candidateL = e.longTable[hash8(cv, bestLongTableBits)] candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
// Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
if false {
// Short at s+3.
// Too often worse...
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
}
// See if we can find a better match by checking where the current best ends. // See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match. // Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit { if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hash8(load6432(src, sAt), bestLongTableBits) nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL] candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
@ -236,6 +287,12 @@ encodeLoop:
} }
} }
if debugAsserts {
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
}
}
// We have a match, we can store the forward value // We have a match, we can store the forward value
if best.rep > 0 { if best.rep > 0 {
s = best.s s = best.s
@ -284,8 +341,8 @@ encodeLoop:
off := index0 + e.cur off := index0 + e.cur
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
h0 := hash8(cv0, bestLongTableBits) h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hash4x64(cv0, bestShortTableBits) h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
off++ off++
@ -311,7 +368,7 @@ encodeLoop:
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
} }
if debugAsserts && canRepeat && int(offset1) > len(src) { if debugAsserts && int(offset1) > len(src) {
panic("invalid offset") panic("invalid offset")
} }
@ -352,8 +409,8 @@ encodeLoop:
// every entry // every entry
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
h0 := hash8(cv0, bestLongTableBits) h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hash4x64(cv0, bestShortTableBits) h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
@ -374,8 +431,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hash4x64(cv, bestShortTableBits) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
nextHashL := hash8(cv, bestLongTableBits) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -425,7 +482,7 @@ func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
e.Encode(blk, src) e.Encode(blk, src)
} }
// ResetDict will reset and set a dictionary if not nil // Reset will reset and set a dictionary if not nil
func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock) e.resetBase(d, singleBlock)
if d == nil { if d == nil {
@ -441,10 +498,10 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
const hashLog = bestShortTableBits const hashLog = bestShortTableBits
cv := load6432(d.content, i-e.maxMatchOff) cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hash4x64(cv, hashLog) // 0 -> 4 nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4
nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5
nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
e.dictTable[nextHash] = prevEntry{ e.dictTable[nextHash] = prevEntry{
prev: e.dictTable[nextHash].offset, prev: e.dictTable[nextHash].offset,
offset: i, offset: i,
@ -472,7 +529,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
} }
if len(d.content) >= 8 { if len(d.content) >= 8 {
cv := load6432(d.content, 0) cv := load6432(d.content, 0)
h := hash8(cv, bestLongTableBits) h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{ e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff, offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset, prev: e.dictLongTable[h].offset,
@ -482,7 +539,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
off := 8 // First to read off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ { for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56) cv = cv>>8 | (uint64(d.content[off]) << 56)
h := hash8(cv, bestLongTableBits) h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{ e.dictLongTable[h] = prevEntry{
offset: i, offset: i,
prev: e.dictLongTable[h].offset, prev: e.dictLongTable[h].offset,

View file

@ -9,6 +9,7 @@ import "fmt"
const ( const (
betterLongTableBits = 19 // Bits used in the long match table betterLongTableBits = 19 // Bits used in the long match table
betterLongTableSize = 1 << betterLongTableBits // Size of the table betterLongTableSize = 1 << betterLongTableBits // Size of the table
betterLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter // Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the // can actually lead to compression degradation since it will 'steal' more from the
@ -16,6 +17,7 @@ const (
// This greatly depends on the type of input. // This greatly depends on the type of input.
betterShortTableBits = 13 // Bits used in the short match table betterShortTableBits = 13 // Bits used in the short match table
betterShortTableSize = 1 << betterShortTableBits // Size of the table betterShortTableSize = 1 << betterShortTableBits // Size of the table
betterShortLen = 5 // Bytes used for table hash
betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
@ -154,8 +156,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hash5(cv, betterShortTableBits) nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hash8(cv, betterLongTableBits) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -214,10 +216,10 @@ encodeLoop:
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2 index0 += 2
} }
cv = load6432(src, s) cv = load6432(src, s)
@ -275,10 +277,10 @@ encodeLoop:
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2 index0 += 2
} }
cv = load6432(src, s) cv = load6432(src, s)
@ -353,7 +355,7 @@ encodeLoop:
// See if we can find a long match at s+1 // See if we can find a long match at s+1
const checkAt = 1 const checkAt = 1
cv := load6432(src, s+checkAt) cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, betterLongTableBits) nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL] candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur coffsetL = candidateL.offset - e.cur
@ -413,8 +415,8 @@ encodeLoop:
} }
// Try to find a better match by searching for a long match at the end of the current best match // Try to find a better match by searching for a long match at the end of the current best match
if true && s+matched < sLimit { if s+matched < sLimit {
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits) nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s) cv := load3232(src, s)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched coffsetL := candidateL.offset - e.cur - matched
@ -495,10 +497,10 @@ encodeLoop:
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2 index0 += 2
} }
@ -516,8 +518,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hash5(cv, betterShortTableBits) nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hash8(cv, betterLongTableBits) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -672,8 +674,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hash5(cv, betterShortTableBits) nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hash8(cv, betterLongTableBits) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -734,11 +736,11 @@ encodeLoop:
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0) e.markLongShardDirty(h0)
h1 := hash5(cv1, betterShortTableBits) h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1) e.markShortShardDirty(h1)
index0 += 2 index0 += 2
@ -798,11 +800,11 @@ encodeLoop:
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0) e.markLongShardDirty(h0)
h1 := hash5(cv1, betterShortTableBits) h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1) e.markShortShardDirty(h1)
index0 += 2 index0 += 2
@ -879,7 +881,7 @@ encodeLoop:
// See if we can find a long match at s+1 // See if we can find a long match at s+1
const checkAt = 1 const checkAt = 1
cv := load6432(src, s+checkAt) cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, betterLongTableBits) nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL] candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur coffsetL = candidateL.offset - e.cur
@ -940,7 +942,7 @@ encodeLoop:
} }
// Try to find a better match by searching for a long match at the end of the current best match // Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit { if s+matched < sLimit {
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits) nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s) cv := load3232(src, s)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched coffsetL := candidateL.offset - e.cur - matched
@ -1021,11 +1023,11 @@ encodeLoop:
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0) e.markLongShardDirty(h0)
h1 := hash5(cv1, betterShortTableBits) h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1) e.markShortShardDirty(h1)
index0 += 2 index0 += 2
@ -1045,8 +1047,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hash5(cv, betterShortTableBits) nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hash8(cv, betterLongTableBits) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -1113,10 +1115,10 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
const hashLog = betterShortTableBits const hashLog = betterShortTableBits
cv := load6432(d.content, i-e.maxMatchOff) cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hash5(cv, hashLog) // 0 -> 4 nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
e.dictTable[nextHash] = tableEntry{ e.dictTable[nextHash] = tableEntry{
val: uint32(cv), val: uint32(cv),
offset: i, offset: i,
@ -1145,7 +1147,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
} }
if len(d.content) >= 8 { if len(d.content) >= 8 {
cv := load6432(d.content, 0) cv := load6432(d.content, 0)
h := hash8(cv, betterLongTableBits) h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{ e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff, offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset, prev: e.dictLongTable[h].offset,
@ -1155,7 +1157,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
off := 8 // First to read off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ { for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56) cv = cv>>8 | (uint64(d.content[off]) << 56)
h := hash8(cv, betterLongTableBits) h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{ e.dictLongTable[h] = prevEntry{
offset: i, offset: i,
prev: e.dictLongTable[h].offset, prev: e.dictLongTable[h].offset,

View file

@ -10,6 +10,7 @@ const (
dFastLongTableBits = 17 // Bits used in the long match table dFastLongTableBits = 17 // Bits used in the long match table
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
@ -17,6 +18,8 @@ const (
dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
dFastShortLen = 5 // Bytes used for table hash
) )
type doubleFastEncoder struct { type doubleFastEncoder struct {
@ -124,8 +127,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hash5(cv, dFastShortTableBits) nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hash8(cv, dFastLongTableBits) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -208,7 +211,7 @@ encodeLoop:
// See if we can find a long match at s+1 // See if we can find a long match at s+1
const checkAt = 1 const checkAt = 1
cv := load6432(src, s+checkAt) cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, dFastLongTableBits) nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL] candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt coffsetL = s - (candidateL.offset - e.cur) + checkAt
@ -304,16 +307,16 @@ encodeLoop:
cv1 := load6432(src, index1) cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
e.longTable[hash8(cv0, dFastLongTableBits)] = te0 e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
e.longTable[hash8(cv1, dFastLongTableBits)] = te1 e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8 cv0 >>= 8
cv1 >>= 8 cv1 >>= 8
te0.offset++ te0.offset++
te1.offset++ te1.offset++
te0.val = uint32(cv0) te0.val = uint32(cv0)
te1.val = uint32(cv1) te1.val = uint32(cv1)
e.table[hash5(cv0, dFastShortTableBits)] = te0 e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
e.table[hash5(cv1, dFastShortTableBits)] = te1 e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s) cv = load6432(src, s)
@ -330,8 +333,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hash5(cv, dFastShortTableBits) nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hash8(cv, dFastLongTableBits) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -436,8 +439,8 @@ encodeLoop:
var t int32 var t int32
for { for {
nextHashS := hash5(cv, dFastShortTableBits) nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hash8(cv, dFastLongTableBits) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -521,7 +524,7 @@ encodeLoop:
// See if we can find a long match at s+1 // See if we can find a long match at s+1
const checkAt = 1 const checkAt = 1
cv := load6432(src, s+checkAt) cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, dFastLongTableBits) nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL] candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt coffsetL = s - (candidateL.offset - e.cur) + checkAt
@ -614,16 +617,16 @@ encodeLoop:
cv1 := load6432(src, index1) cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
e.longTable[hash8(cv0, dFastLongTableBits)] = te0 e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
e.longTable[hash8(cv1, dFastLongTableBits)] = te1 e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8 cv0 >>= 8
cv1 >>= 8 cv1 >>= 8
te0.offset++ te0.offset++
te1.offset++ te1.offset++
te0.val = uint32(cv0) te0.val = uint32(cv0)
te1.val = uint32(cv1) te1.val = uint32(cv1)
e.table[hash5(cv0, dFastShortTableBits)] = te0 e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
e.table[hash5(cv1, dFastShortTableBits)] = te1 e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s) cv = load6432(src, s)
@ -640,8 +643,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hash5(cv1>>8, dFastShortTableBits) nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
nextHashL := hash8(cv, dFastLongTableBits) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -782,8 +785,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHashS := hash5(cv, dFastShortTableBits) nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hash8(cv, dFastLongTableBits) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
@ -868,7 +871,7 @@ encodeLoop:
// See if we can find a long match at s+1 // See if we can find a long match at s+1
const checkAt = 1 const checkAt = 1
cv := load6432(src, s+checkAt) cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, dFastLongTableBits) nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL] candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt coffsetL = s - (candidateL.offset - e.cur) + checkAt
@ -965,8 +968,8 @@ encodeLoop:
cv1 := load6432(src, index1) cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hash8(cv0, dFastLongTableBits) longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hash8(cv0, dFastLongTableBits) longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0 e.longTable[longHash1] = te0
e.longTable[longHash2] = te1 e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1) e.markLongShardDirty(longHash1)
@ -977,8 +980,8 @@ encodeLoop:
te1.offset++ te1.offset++
te0.val = uint32(cv0) te0.val = uint32(cv0)
te1.val = uint32(cv1) te1.val = uint32(cv1)
hashVal1 := hash5(cv0, dFastShortTableBits) hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
hashVal2 := hash5(cv1, dFastShortTableBits) hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
e.table[hashVal1] = te0 e.table[hashVal1] = te0
e.markShardDirty(hashVal1) e.markShardDirty(hashVal1)
e.table[hashVal2] = te1 e.table[hashVal2] = te1
@ -999,8 +1002,8 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHashS := hash5(cv, dFastShortTableBits) nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hash8(cv, dFastLongTableBits) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match. // We have at least 4 byte match.
// No need to check backwards. We come straight from a match // No need to check backwards. We come straight from a match
@ -1071,14 +1074,14 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
} }
if len(d.content) >= 8 { if len(d.content) >= 8 {
cv := load6432(d.content, 0) cv := load6432(d.content, 0)
e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv), val: uint32(cv),
offset: e.maxMatchOff, offset: e.maxMatchOff,
} }
end := int32(len(d.content)) - 8 + e.maxMatchOff end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff + 1; i < end; i++ { for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv), val: uint32(cv),
offset: i, offset: i,
} }

View file

@ -11,12 +11,13 @@ import (
) )
const ( const (
tableBits = 15 // Bits used in the table tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table tableSize = 1 << tableBits // Size of the table
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
tableShardSize = tableSize / tableShardCnt // Size of an individual shard tableShardSize = tableSize / tableShardCnt // Size of an individual shard
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. tableFastHashLen = 6
maxMatchLength = 131074 tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
maxMatchLength = 131074
) )
type tableEntry struct { type tableEntry struct {
@ -122,8 +123,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHash := hash6(cv, hashLog) nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hash6(cv>>8, hashLog) nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash] candidate := e.table[nextHash]
candidate2 := e.table[nextHash2] candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2 repIndex := s - offset1 + 2
@ -301,7 +302,7 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHash := hash6(cv, hashLog) nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0 seq.litLen = 0
@ -405,8 +406,8 @@ encodeLoop:
// By not using them for the first 3 matches // By not using them for the first 3 matches
for { for {
nextHash := hash6(cv, hashLog) nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hash6(cv>>8, hashLog) nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash] candidate := e.table[nextHash]
candidate2 := e.table[nextHash2] candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2 repIndex := s - offset1 + 2
@ -589,7 +590,7 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHash := hash6(cv, hashLog) nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0 seq.litLen = 0
@ -715,8 +716,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
nextHash := hash6(cv, hashLog) nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hash6(cv>>8, hashLog) nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash] candidate := e.table[nextHash]
candidate2 := e.table[nextHash2] candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2 repIndex := s - offset1 + 2
@ -896,7 +897,7 @@ encodeLoop:
} }
// Store this, since we have it. // Store this, since we have it.
nextHash := hash6(cv, hashLog) nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.markShardDirty(nextHash) e.markShardDirty(nextHash)
seq.matchLen = uint32(l) - zstdMinMatch seq.matchLen = uint32(l) - zstdMinMatch
@ -957,9 +958,9 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const hashLog = tableBits const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff) cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hash6(cv, hashLog) // 0 -> 5 nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
e.dictTable[nextHash] = tableEntry{ e.dictTable[nextHash] = tableEntry{
val: uint32(cv), val: uint32(cv),
offset: i, offset: i,

View file

@ -33,7 +33,7 @@ type encoder interface {
Block() *blockEnc Block() *blockEnc
CRC() *xxhash.Digest CRC() *xxhash.Digest
AppendCRC([]byte) []byte AppendCRC([]byte) []byte
WindowSize(size int) int32 WindowSize(size int64) int32
UseBlock(*blockEnc) UseBlock(*blockEnc)
Reset(d *dict, singleBlock bool) Reset(d *dict, singleBlock bool)
} }
@ -48,6 +48,8 @@ type encoderState struct {
err error err error
writeErr error writeErr error
nWritten int64 nWritten int64
nInput int64
frameContentSize int64
headerWritten bool headerWritten bool
eofWritten bool eofWritten bool
fullFrameWritten bool fullFrameWritten bool
@ -120,7 +122,21 @@ func (e *Encoder) Reset(w io.Writer) {
s.w = w s.w = w
s.err = nil s.err = nil
s.nWritten = 0 s.nWritten = 0
s.nInput = 0
s.writeErr = nil s.writeErr = nil
s.frameContentSize = 0
}
// ResetContentSize will reset and set a content size for the next stream.
// If the bytes written does not match the size given an error will be returned
// when calling Close().
// This is removed when Reset is called.
// Sizes <= 0 results in no content size set.
func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
e.Reset(w)
if size >= 0 {
e.state.frameContentSize = size
}
} }
// Write data to the encoder. // Write data to the encoder.
@ -190,6 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err return s.err
} }
s.nWritten += int64(n2) s.nWritten += int64(n2)
s.nInput += int64(len(s.filling))
s.current = s.current[:0] s.current = s.current[:0]
s.filling = s.filling[:0] s.filling = s.filling[:0]
s.headerWritten = true s.headerWritten = true
@ -200,8 +217,8 @@ func (e *Encoder) nextBlock(final bool) error {
var tmp [maxHeaderSize]byte var tmp [maxHeaderSize]byte
fh := frameHeader{ fh := frameHeader{
ContentSize: 0, ContentSize: uint64(s.frameContentSize),
WindowSize: uint32(s.encoder.WindowSize(0)), WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)),
SingleSegment: false, SingleSegment: false,
Checksum: e.o.crc, Checksum: e.o.crc,
DictID: e.o.dict.ID(), DictID: e.o.dict.ID(),
@ -243,6 +260,7 @@ func (e *Encoder) nextBlock(final bool) error {
// Move blocks forward. // Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1) s.wg.Add(1)
go func(src []byte) { go func(src []byte) {
if debugEncoder { if debugEncoder {
@ -394,6 +412,11 @@ func (e *Encoder) Close() error {
if err != nil { if err != nil {
return err return err
} }
if s.frameContentSize > 0 {
if s.nInput != s.frameContentSize {
return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
}
}
if e.state.fullFrameWritten { if e.state.fullFrameWritten {
return s.err return s.err
} }
@ -470,7 +493,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
fh := frameHeader{ fh := frameHeader{
ContentSize: uint64(len(src)), ContentSize: uint64(len(src)),
WindowSize: uint32(enc.WindowSize(len(src))), WindowSize: uint32(enc.WindowSize(int64(len(src)))),
SingleSegment: single, SingleSegment: single,
Checksum: e.o.crc, Checksum: e.o.crc,
DictID: e.o.dict.ID(), DictID: e.o.dict.ID(),

View file

@ -189,7 +189,7 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
case level >= 6 && level < 10: case level >= 6 && level < 10:
return SpeedBetterCompression return SpeedBetterCompression
case level >= 10: case level >= 10:
return SpeedBetterCompression return SpeedBestCompression
} }
return SpeedDefault return SpeedDefault
} }

View file

@ -13,24 +13,24 @@ const (
prime8bytes = 0xcf1bbcdcb7a56463 prime8bytes = 0xcf1bbcdcb7a56463
) )
// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. // hashLen returns a hash of the lowest mls bytes of with length output bits.
// l must be >=4 and <=8. Any other value will return hash for 4 bytes. // mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// h should always be <32. // length should always be < 32.
// Preferably h and l should be a constant. // Preferably length and mls should be a constant for inlining.
// FIXME: This does NOT get resolved, if 'mls' is constant, func hashLen(u uint64, length, mls uint8) uint32 {
// so this cannot be used.
func hashLen(u uint64, hashLog, mls uint8) uint32 {
switch mls { switch mls {
case 3:
return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5: case 5:
return hash5(u, hashLog) return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6: case 6:
return hash6(u, hashLog) return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7: case 7:
return hash7(u, hashLog) return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8: case 8:
return hash8(u, hashLog) return uint32((u * prime8bytes) >> (64 - length))
default: default:
return hash4x64(u, hashLog) return (uint32(u) * prime4bytes) >> (32 - length)
} }
} }
@ -39,39 +39,3 @@ func hashLen(u uint64, hashLog, mls uint8) uint32 {
func hash3(u uint32, h uint8) uint32 { func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
} }
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> ((32 - h) & 31)
}
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4x64(u uint64, h uint8) uint32 {
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
}
// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash5(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
}
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash6(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
}
// hash8 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash8(u uint64, h uint8) uint32 {
return uint32((u * prime8bytes) >> ((64 - h) & 63))
}

3
vendor/modules.txt vendored
View file

@ -129,8 +129,9 @@ github.com/jmespath/go-jmespath
github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report
github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/formatter
github.com/jstemmer/go-junit-report/parser github.com/jstemmer/go-junit-report/parser
# github.com/klauspost/compress v1.13.1 # github.com/klauspost/compress v1.13.4
## explicit ## explicit
github.com/klauspost/compress
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/gzip github.com/klauspost/compress/gzip