[VOL-5486] Upgrade library versions
Change-Id: I8b4e88699e03f44ee13e467867f45ae3f0a63c4b
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 16d4ab6..20671dc 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -3,6 +3,8 @@
import (
"errors"
"fmt"
+ "math"
+ "math/bits"
"runtime"
"strings"
)
@@ -24,6 +26,7 @@
allLitEntropy bool
customWindow bool
customALEntropy bool
+ customBlockSize bool
lowMem bool
dict *dict
}
@@ -33,10 +36,10 @@
concurrent: runtime.GOMAXPROCS(0),
crc: true,
single: nil,
- blockSize: 1 << 16,
+ blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
- allLitEntropy: true,
+ allLitEntropy: false,
lowMem: false,
}
}
@@ -46,22 +49,22 @@
switch o.level {
case SpeedFastest:
if o.dict != nil {
- return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
- return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault:
if o.dict != nil {
- return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+ return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
}
- return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression:
if o.dict != nil {
- return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
- return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression:
- return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
}
panic("unknown compression level")
}
@@ -75,6 +78,7 @@
// WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently.
// The value supplied must be at least 1.
+// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS.
func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error {
@@ -90,7 +94,7 @@
// The value must be a power of two between MinWindowSize and MaxWindowSize.
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
-// The default value is determined by the compression level.
+// The default value is determined by the compression level and max 8MB.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
@@ -106,6 +110,7 @@
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
+ o.customBlockSize = true
}
return nil
}
@@ -124,7 +129,7 @@
}
// No need to waste our time.
if n == 1 {
- o.pad = 0
+ n = 0
}
if n > 1<<30 {
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
@@ -188,10 +193,9 @@
return SpeedDefault
case level >= 6 && level < 10:
return SpeedBetterCompression
- case level >= 10:
- return SpeedBetterCompression
+ default:
+ return SpeedBestCompression
}
- return SpeedDefault
}
// String provides a string representation of the compression level.
@@ -222,16 +226,19 @@
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
+ if !o.customBlockSize {
+ o.blockSize = 1 << 16
+ }
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
- o.windowSize = 16 << 20
+ o.windowSize = 8 << 20
case SpeedBestCompression:
- o.windowSize = 32 << 20
+ o.windowSize = 8 << 20
}
}
if !o.customALEntropy {
- o.allLitEntropy = l > SpeedFastest
+ o.allLitEntropy = l > SpeedDefault
}
return nil
@@ -278,7 +285,7 @@
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
-// If this is not specified, block encodes will automatically choose this based on the input size.
+// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
// This setting has no effect on streamed encodes.
func WithSingleSegment(b bool) EOption {
return func(o *encoderOptions) error {
@@ -299,7 +306,13 @@
}
// WithEncoderDict allows to register a dictionary that will be used for the encode.
+//
+// The slice dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
// The encoder *may* choose to use no dictionary instead for certain payloads.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption {
return func(o *encoderOptions) error {
d, err := loadDict(dict)
@@ -310,3 +323,17 @@
return nil
}
}
+
+// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
+//
+// The slice content may contain arbitrary data. It will be used as an initial
+// history.
+func WithEncoderDictRaw(id uint32, content []byte) EOption {
+ return func(o *encoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
+ return nil
+ }
+}