vendor: update github.com/valyala/gozstd from v1.5.1 to v1.6.0

This commit is contained in:
Aliaksandr Valialkin 2019-08-15 12:56:29 +03:00
parent 75a58dee02
commit 5bb61b8b38
14 changed files with 171 additions and 147 deletions

2
go.mod
View file

@ -9,7 +9,7 @@ require (
github.com/klauspost/compress v1.7.5 github.com/klauspost/compress v1.7.5
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/valyala/fastjson v1.4.1 github.com/valyala/fastjson v1.4.1
github.com/valyala/gozstd v1.5.1 github.com/valyala/gozstd v1.6.0
github.com/valyala/histogram v1.0.1 github.com/valyala/histogram v1.0.1
github.com/valyala/quicktemplate v1.1.1 github.com/valyala/quicktemplate v1.1.1
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa

4
go.sum
View file

@ -41,8 +41,8 @@ github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/y
github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o= github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o=
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/gozstd v1.5.1 h1:ZLepItgu2g+B2CfVQy6KCV/as8lnJ7ef1KU6DPxQSS0= github.com/valyala/gozstd v1.6.0 h1:34qKK75C6Dx9zof2JqUiunfJQ87Up6vTHXABWDyCH+g=
github.com/valyala/gozstd v1.5.1/go.mod h1:oYOS+oJovjw9ewtrwEYb9+ybolEXd6pHyLMuAWN5zts= github.com/valyala/gozstd v1.6.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/histogram v1.0.1 h1:FzA7n2Tz/wKRMejgu3PV1vw3htAklTjjuoI6z3d4KDg= github.com/valyala/histogram v1.0.1 h1:FzA7n2Tz/wKRMejgu3PV1vw3htAklTjjuoI6z3d4KDg=
github.com/valyala/histogram v1.0.1/go.mod h1:lQy0xA4wUz2+IUnf97SivorsJIp8FxsnRd6x25q7Mto= github.com/valyala/histogram v1.0.1/go.mod h1:lQy0xA4wUz2+IUnf97SivorsJIp8FxsnRd6x25q7Mto=
github.com/valyala/quicktemplate v1.1.1 h1:C58y/wN0FMTi2PR0n3onltemfFabany53j7M6SDDB8k= github.com/valyala/quicktemplate v1.1.1 h1:C58y/wN0FMTi2PR0n3onltemfFabany53j7M6SDDB8k=

View file

@ -3,6 +3,7 @@ GOARCH ?= $(shell go env GOARCH)
GOOS_GOARCH := $(GOOS)_$(GOARCH) GOOS_GOARCH := $(GOOS)_$(GOARCH)
GOOS_GOARCH_NATIVE := $(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) GOOS_GOARCH_NATIVE := $(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
LIBZSTD_NAME := libzstd_$(GOOS_GOARCH).a LIBZSTD_NAME := libzstd_$(GOOS_GOARCH).a
ZSTD_VERSION ?= master
.PHONY: libzstd.a .PHONY: libzstd.a
@ -10,15 +11,15 @@ libzstd.a: $(LIBZSTD_NAME)
$(LIBZSTD_NAME): $(LIBZSTD_NAME):
ifeq ($(GOOS_GOARCH),$(GOOS_GOARCH_NATIVE)) ifeq ($(GOOS_GOARCH),$(GOOS_GOARCH_NATIVE))
cd zstd/lib && ZSTD_LEGACY_SUPPORT=0 $(MAKE) clean libzstd.a cd zstd/lib && ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a
mv zstd/lib/libzstd.a $(LIBZSTD_NAME) mv zstd/lib/libzstd.a $(LIBZSTD_NAME)
else else
ifeq ($(GOOS_GOARCH),linux_arm) ifeq ($(GOOS_GOARCH),linux_arm)
cd zstd/lib && CC=arm-linux-gnueabi-gcc ZSTD_LEGACY_SUPPORT=0 $(MAKE) clean libzstd.a cd zstd/lib && CC=arm-linux-gnueabi-gcc ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a
mv zstd/lib/libzstd.a libzstd_linux_arm.a mv zstd/lib/libzstd.a libzstd_linux_arm.a
endif endif
ifeq ($(GOOS_GOARCH),linux_arm64) ifeq ($(GOOS_GOARCH),linux_arm64)
cd zstd/lib && CC=aarch64-linux-gnu-gcc ZSTD_LEGACY_SUPPORT=0 $(MAKE) clean libzstd.a cd zstd/lib && CC=aarch64-linux-gnu-gcc ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a
mv zstd/lib/libzstd.a libzstd_linux_arm64.a mv zstd/lib/libzstd.a libzstd_linux_arm64.a
endif endif
endif endif
@ -29,7 +30,7 @@ clean:
update-zstd: update-zstd:
rm -rf zstd-tmp rm -rf zstd-tmp
git clone --branch master --depth 1 https://github.com/Facebook/zstd zstd-tmp git clone --branch $(ZSTD_VERSION) --depth 1 https://github.com/Facebook/zstd zstd-tmp
rm -rf zstd-tmp/.git rm -rf zstd-tmp/.git
rm -rf zstd rm -rf zstd
mv zstd-tmp zstd mv zstd-tmp zstd

View file

@ -73,5 +73,8 @@ and [Reader](https://godoc.org/github.com/valyala/gozstd#Reader) for stream deco
* Q: _I don't trust `libzstd*.a` binary files from the repo or these files dont't work on my OS/ARCH. How to rebuild them?_ * Q: _I don't trust `libzstd*.a` binary files from the repo or these files dont't work on my OS/ARCH. How to rebuild them?_
A: Just run `make clean libzstd.a` if your OS/ARCH is supported. A: Just run `make clean libzstd.a` if your OS/ARCH is supported.
* Q: _How do I specify custom build flags when recompiling `libzstd*.a`?_
A: You can specify MOREFLAGS=... variable when running `make` like this: `MOREFLAGS=-fPIC make clean libzstd.a`.
* Q: _Why the repo contains `libzstd*.a` binary files?_ * Q: _Why the repo contains `libzstd*.a` binary files?_
A: This simplifies package installation to usual `go get` without additional steps for building the `libzstd*.a` A: This simplifies package installation to usual `go get` without additional steps for building the `libzstd*.a`

View file

@ -1,6 +1,8 @@
package gozstd package gozstd
/* /*
#cgo CFLAGS: -O3
#define ZSTD_STATIC_LINKING_ONLY #define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h" #include "zstd.h"
@ -86,7 +88,10 @@ func BuildDict(samples [][]byte, desiredDictLen int) []byte {
&samplesSizes[0], &samplesSizes[0],
C.unsigned(len(samplesSizes))) C.unsigned(len(samplesSizes)))
buildDictLock.Unlock() buildDictLock.Unlock()
ensureNoError("ZDICT_trainFromBuffer", result) if C.ZDICT_isError(result) != 0 {
// Return empty dictionary, since the original samples are too small.
return nil
}
dictLen := int(result) dictLen := int(result)
return dict[:dictLen] return dict[:dictLen]

View file

@ -1 +1,3 @@
module github.com/valyala/gozstd module github.com/valyala/gozstd
go 1.12

View file

@ -66,72 +66,55 @@ func CompressDict(dst, src []byte, cd *CDict) []byte {
} }
func compressDictLevel(dst, src []byte, cd *CDict, compressionLevel int) []byte { func compressDictLevel(dst, src []byte, cd *CDict, compressionLevel int) []byte {
compressInitOnce.Do(compressInit) concurrencyLimitCh <- struct{}{}
var cctx, cctxDict *cctxWrapper
if cd == nil {
cctx = cctxPool.Get().(*cctxWrapper)
} else {
cctxDict = cctxDictPool.Get().(*cctxWrapper)
}
dst = compress(cctx, cctxDict, dst, src, cd, compressionLevel)
if cd == nil {
cctxPool.Put(cctx)
} else {
cctxDictPool.Put(cctxDict)
}
<-concurrencyLimitCh
cw := getCompressWork()
cw.dst = dst
cw.src = src
cw.cd = cd
cw.compressionLevel = compressionLevel
compressWorkCh <- cw
<-cw.done
dst = cw.dst
putCompressWork(cw)
return dst return dst
} }
func getCompressWork() *compressWork { var cctxPool = &sync.Pool{
v := compressWorkPool.Get() New: newCCtx,
if v == nil {
v = &compressWork{
done: make(chan struct{}),
}
}
return v.(*compressWork)
} }
func putCompressWork(cw *compressWork) { var cctxDictPool = &sync.Pool{
cw.src = nil New: newCCtx,
cw.dst = nil
cw.cd = nil
cw.compressionLevel = 0
compressWorkPool.Put(cw)
} }
type compressWork struct { func newCCtx() interface{} {
dst []byte
src []byte
cd *CDict
compressionLevel int
done chan struct{}
}
var (
compressWorkCh chan *compressWork
compressWorkPool sync.Pool
compressInitOnce sync.Once
)
func compressInit() {
gomaxprocs := runtime.GOMAXPROCS(-1)
compressWorkCh = make(chan *compressWork, gomaxprocs)
for i := 0; i < gomaxprocs; i++ {
go compressWorker()
}
}
func compressWorker() {
cctx := C.ZSTD_createCCtx() cctx := C.ZSTD_createCCtx()
cctxDict := C.ZSTD_createCCtx() cw := &cctxWrapper{
cctx: cctx,
for cw := range compressWorkCh {
cw.dst = compress(cctx, cctxDict, cw.dst, cw.src, cw.cd, cw.compressionLevel)
cw.done <- struct{}{}
} }
runtime.SetFinalizer(cw, freeCCtx)
return cw
} }
func compress(cctx, cctxDict *C.ZSTD_CCtx, dst, src []byte, cd *CDict, compressionLevel int) []byte { func freeCCtx(cw *cctxWrapper) {
C.ZSTD_freeCCtx(cw.cctx)
cw.cctx = nil
}
type cctxWrapper struct {
cctx *C.ZSTD_CCtx
}
func compress(cctx, cctxDict *cctxWrapper, dst, src []byte, cd *CDict, compressionLevel int) []byte {
if len(src) == 0 { if len(src) == 0 {
return dst return dst
} }
@ -167,9 +150,9 @@ func compress(cctx, cctxDict *C.ZSTD_CCtx, dst, src []byte, cd *CDict, compressi
return dst[:dstLen+compressedSize] return dst[:dstLen+compressedSize]
} }
func compressInternal(cctx, cctxDict *C.ZSTD_CCtx, dst, src []byte, cd *CDict, compressionLevel int, mustSucceed bool) C.size_t { func compressInternal(cctx, cctxDict *cctxWrapper, dst, src []byte, cd *CDict, compressionLevel int, mustSucceed bool) C.size_t {
if cd != nil { if cd != nil {
result := C.ZSTD_compress_usingCDict_wrapper(cctxDict, result := C.ZSTD_compress_usingCDict_wrapper(cctxDict.cctx,
C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
C.size_t(cap(dst)), C.size_t(cap(dst)),
C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
@ -183,7 +166,7 @@ func compressInternal(cctx, cctxDict *C.ZSTD_CCtx, dst, src []byte, cd *CDict, c
} }
return result return result
} }
result := C.ZSTD_compressCCtx_wrapper(cctx, result := C.ZSTD_compressCCtx_wrapper(cctx.cctx,
C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
C.size_t(cap(dst)), C.size_t(cap(dst)),
C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
@ -207,72 +190,56 @@ func Decompress(dst, src []byte) ([]byte, error) {
// //
// The given dictionary dd is used for the decompression. // The given dictionary dd is used for the decompression.
func DecompressDict(dst, src []byte, dd *DDict) ([]byte, error) { func DecompressDict(dst, src []byte, dd *DDict) ([]byte, error) {
decompressInitOnce.Do(decompressInit) concurrencyLimitCh <- struct{}{}
var dctx, dctxDict *dctxWrapper
if dd == nil {
dctx = dctxPool.Get().(*dctxWrapper)
} else {
dctxDict = dctxDictPool.Get().(*dctxWrapper)
}
var err error
dst, err = decompress(dctx, dctxDict, dst, src, dd)
if dd == nil {
dctxPool.Put(dctx)
} else {
dctxDictPool.Put(dctxDict)
}
<-concurrencyLimitCh
dw := getDecompressWork()
dw.dst = dst
dw.src = src
dw.dd = dd
decompressWorkCh <- dw
<-dw.done
dst = dw.dst
err := dw.err
putDecompressWork(dw)
return dst, err return dst, err
} }
func getDecompressWork() *decompressWork { var dctxPool = &sync.Pool{
v := decompressWorkPool.Get() New: newDCtx,
if v == nil {
v = &decompressWork{
done: make(chan struct{}),
}
}
return v.(*decompressWork)
} }
func putDecompressWork(dw *decompressWork) { var dctxDictPool = &sync.Pool{
dw.dst = nil New: newDCtx,
dw.src = nil
dw.dd = nil
dw.err = nil
decompressWorkPool.Put(dw)
} }
type decompressWork struct { func newDCtx() interface{} {
dst []byte
src []byte
dd *DDict
err error
done chan struct{}
}
var (
decompressWorkCh chan *decompressWork
decompressWorkPool sync.Pool
decompressInitOnce sync.Once
)
func decompressInit() {
gomaxprocs := runtime.GOMAXPROCS(-1)
decompressWorkCh = make(chan *decompressWork, gomaxprocs)
for i := 0; i < gomaxprocs; i++ {
go decompressWorker()
}
}
func decompressWorker() {
dctx := C.ZSTD_createDCtx() dctx := C.ZSTD_createDCtx()
dctxDict := C.ZSTD_createDCtx() dw := &dctxWrapper{
dctx: dctx,
for dw := range decompressWorkCh {
dw.dst, dw.err = decompress(dctx, dctxDict, dw.dst, dw.src, dw.dd)
dw.done <- struct{}{}
} }
runtime.SetFinalizer(dw, freeDCtx)
return dw
} }
func decompress(dctx, dctxDict *C.ZSTD_DCtx, dst, src []byte, dd *DDict) ([]byte, error) { func freeDCtx(dw *dctxWrapper) {
C.ZSTD_freeDCtx(dw.dctx)
dw.dctx = nil
}
type dctxWrapper struct {
dctx *C.ZSTD_DCtx
}
func decompress(dctx, dctxDict *dctxWrapper, dst, src []byte, dd *DDict) ([]byte, error) {
if len(src) == 0 { if len(src) == 0 {
return dst, nil return dst, nil
} }
@ -325,17 +292,17 @@ func decompress(dctx, dctxDict *C.ZSTD_DCtx, dst, src []byte, dd *DDict) ([]byte
return dst[:dstLen], fmt.Errorf("decompression error: %s", errStr(result)) return dst[:dstLen], fmt.Errorf("decompression error: %s", errStr(result))
} }
func decompressInternal(dctx, dctxDict *C.ZSTD_DCtx, dst, src []byte, dd *DDict) C.size_t { func decompressInternal(dctx, dctxDict *dctxWrapper, dst, src []byte, dd *DDict) C.size_t {
var n C.size_t var n C.size_t
if dd != nil { if dd != nil {
n = C.ZSTD_decompress_usingDDict_wrapper(dctxDict, n = C.ZSTD_decompress_usingDDict_wrapper(dctxDict.dctx,
C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
C.size_t(cap(dst)), C.size_t(cap(dst)),
C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
C.size_t(len(src)), C.size_t(len(src)),
dd.p) dd.p)
} else { } else {
n = C.ZSTD_decompressDCtx_wrapper(dctx, n = C.ZSTD_decompressDCtx_wrapper(dctx.dctx,
C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
C.size_t(cap(dst)), C.size_t(cap(dst)),
C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
@ -347,6 +314,11 @@ func decompressInternal(dctx, dctxDict *C.ZSTD_DCtx, dst, src []byte, dd *DDict)
return n return n
} }
var concurrencyLimitCh = func() chan struct{} {
gomaxprocs := runtime.GOMAXPROCS(-1)
return make(chan struct{}, gomaxprocs)
}()
func errStr(result C.size_t) string { func errStr(result C.size_t) string {
errCode := C.ZSTD_getErrorCode(result) errCode := C.ZSTD_getErrorCode(result)
errCStr := C.ZSTD_getErrorString(errCode) errCStr := C.ZSTD_getErrorString(errCode)

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -164,14 +164,18 @@ func (zw *Writer) ReadFrom(r io.Reader) (int64, error) {
// Fill the inBuf. // Fill the inBuf.
for zw.inBuf.size < cstreamInBufSize { for zw.inBuf.size < cstreamInBufSize {
n, err := r.Read(zw.inBufGo[zw.inBuf.size:cstreamInBufSize]) n, err := r.Read(zw.inBufGo[zw.inBuf.size:cstreamInBufSize])
// Sometimes n > 0 even when Read() returns an error.
// This is true especially if the error is io.EOF.
zw.inBuf.size += C.size_t(n)
nn += int64(n)
if err != nil { if err != nil {
if err == io.EOF { if err == io.EOF {
return nn, nil return nn, nil
} }
return nn, err return nn, err
} }
zw.inBuf.size += C.size_t(n)
nn += int64(n)
} }
// Flush the inBuf. // Flush the inBuf.

View file

@ -94,6 +94,8 @@ typedef struct {
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */
unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
ZDICT_params_t zParams; ZDICT_params_t zParams;
} ZDICT_cover_params_t; } ZDICT_cover_params_t;
@ -105,6 +107,9 @@ typedef struct {
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */
unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
ZDICT_params_t zParams; ZDICT_params_t zParams;
} ZDICT_fastCover_params_t; } ZDICT_fastCover_params_t;

View file

@ -71,7 +71,7 @@ extern "C" {
/*------ Version ------*/ /*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 4 #define ZSTD_VERSION_MINOR 4
#define ZSTD_VERSION_RELEASE 0 #define ZSTD_VERSION_RELEASE 2
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */ ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
@ -82,16 +82,16 @@ ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library v
#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */ ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */
/*************************************** /* *************************************
* Default constant * Default constant
***************************************/ ***************************************/
#ifndef ZSTD_CLEVEL_DEFAULT #ifndef ZSTD_CLEVEL_DEFAULT
# define ZSTD_CLEVEL_DEFAULT 3 # define ZSTD_CLEVEL_DEFAULT 3
#endif #endif
/*************************************** /* *************************************
* Constants * Constants
***************************************/ ***************************************/
/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */ /* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */ #define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
@ -183,9 +183,14 @@ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compres
***************************************/ ***************************************/
/*= Compression context /*= Compression context
* When compressing many times, * When compressing many times,
* it is recommended to allocate a context just once, and re-use it for each successive compression operation. * it is recommended to allocate a context just once,
* and re-use it for each successive compression operation.
* This will make workload friendlier for system's memory. * This will make workload friendlier for system's memory.
* Use one context per thread for parallel execution in multi-threaded environments. */ * Note : re-using context is just a speed / resource optimization.
* It doesn't change the compression ratio, which remains identical.
* Note 2 : In multi-threaded environments,
* use one different context per thread for parallel execution.
*/
typedef struct ZSTD_CCtx_s ZSTD_CCtx; typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
@ -380,6 +385,7 @@ typedef enum {
* ZSTD_c_forceMaxWindow * ZSTD_c_forceMaxWindow
* ZSTD_c_forceAttachDict * ZSTD_c_forceAttachDict
* ZSTD_c_literalCompressionMode * ZSTD_c_literalCompressionMode
* ZSTD_c_targetCBlockSize
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly; * note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change. * also, the enums values themselves are unstable and can still change.
@ -389,6 +395,7 @@ typedef enum {
ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam3=1000,
ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam4=1001,
ZSTD_c_experimentalParam5=1002, ZSTD_c_experimentalParam5=1002,
ZSTD_c_experimentalParam6=1003,
} ZSTD_cParameter; } ZSTD_cParameter;
typedef struct { typedef struct {
@ -657,17 +664,33 @@ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_inBuffer* input, ZSTD_inBuffer* input,
ZSTD_EndDirective endOp); ZSTD_EndDirective endOp);
ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
/******************************************************************************* /* These buffer sizes are softly recommended.
* This is a legacy streaming API, and can be replaced by ZSTD_CCtx_reset() and * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
* ZSTD_compressStream2(). It is redundant, but is still fully supported. * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
* reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
*
* However, note that these recommendations are from the perspective of a C caller program.
* If the streaming interface is invoked from some other language,
* especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
* a major performance rule is to reduce crossing such interface to an absolute minimum.
* It's not rare that performance ends being spent more into the interface, rather than compression itself.
* In which cases, prefer using large buffers, as large as practical,
* for both input and output, to reduce the nb of roundtrips.
*/
ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
/* *****************************************************************************
* This following is a legacy streaming API.
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
* It is redundant, but remains fully supported.
* Advanced parameters and dictionary compression can only be used through the * Advanced parameters and dictionary compression can only be used through the
* new API. * new API.
******************************************************************************/ ******************************************************************************/
/** /*!
* Equivalent to: * Equivalent to:
* *
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
@ -675,16 +698,16 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
*/ */
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
/** /*!
* Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
* NOTE: The return value is different. ZSTD_compressStream() returns a hint for * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
* the next read size (if non-zero and not an error). ZSTD_compressStream2() * the next read size (if non-zero and not an error). ZSTD_compressStream2()
* returns the number of bytes left to flush (if non-zero and not an error). * returns the minimum nb of bytes left to flush (if non-zero and not an error).
*/ */
ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
/** Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */ /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
/** Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
@ -969,7 +992,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#endif /* ZSTD_H_235446 */ #endif /* ZSTD_H_235446 */
/**************************************************************************************** /* **************************************************************************************
* ADVANCED AND EXPERIMENTAL FUNCTIONS * ADVANCED AND EXPERIMENTAL FUNCTIONS
**************************************************************************************** ****************************************************************************************
* The definitions in the following section are considered experimental. * The definitions in the following section are considered experimental.
@ -1037,6 +1060,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#define ZSTD_LDM_HASHRATELOG_MIN 0 #define ZSTD_LDM_HASHRATELOG_MIN 0
#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) #define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
/* Advanced parameter bounds */
#define ZSTD_TARGETCBLOCKSIZE_MIN 64
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
/* internal */ /* internal */
#define ZSTD_HASHLOG3_MAX 17 #define ZSTD_HASHLOG3_MAX 17
@ -1162,7 +1189,7 @@ typedef enum {
* however it does mean that all frame data must be present and valid. */ * however it does mean that all frame data must be present and valid. */
ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
/** ZSTD_decompressBound() : /*! ZSTD_decompressBound() :
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
* `srcSize` must be the _exact_ size of this series * `srcSize` must be the _exact_ size of this series
* (i.e. there should be a frame boundary at `src + srcSize`) * (i.e. there should be a frame boundary at `src + srcSize`)
@ -1409,6 +1436,11 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
*/ */
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
/* Tries to fit compressed block size to be around targetCBlockSize.
* No target when targetCBlockSize == 0.
* There is no guarantee on compressed block size (default:0) */
#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
/*! ZSTD_CCtx_getParameter() : /*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter, * Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value. * and store it into int* value.
@ -1843,7 +1875,7 @@ typedef struct {
unsigned checksumFlag; unsigned checksumFlag;
} ZSTD_frameHeader; } ZSTD_frameHeader;
/** ZSTD_getFrameHeader() : /*! ZSTD_getFrameHeader() :
* decode Frame Header, or requires larger `srcSize`. * decode Frame Header, or requires larger `srcSize`.
* @return : 0, `zfhPtr` is correctly filled, * @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount, * >0, `srcSize` is too small, value is wanted `srcSize` amount,

2
vendor/modules.txt vendored
View file

@ -18,7 +18,7 @@ github.com/valyala/bytebufferpool
github.com/valyala/fastjson/fastfloat github.com/valyala/fastjson/fastfloat
# github.com/valyala/fastrand v1.0.0 # github.com/valyala/fastrand v1.0.0
github.com/valyala/fastrand github.com/valyala/fastrand
# github.com/valyala/gozstd v1.5.1 # github.com/valyala/gozstd v1.6.0
github.com/valyala/gozstd github.com/valyala/gozstd
# github.com/valyala/histogram v1.0.1 # github.com/valyala/histogram v1.0.1
github.com/valyala/histogram github.com/valyala/histogram