[VOL-5486] Upgrade library versions
Change-Id: I8b4e88699e03f44ee13e467867f45ae3f0a63c4b
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 8629d43..d36be7b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -10,6 +10,7 @@
dFastLongTableBits = 17 // Bits used in the long match table
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
@@ -17,6 +18,8 @@
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dFastShortLen = 5 // Bytes used for table hash
+
)
type doubleFastEncoder struct {
@@ -41,14 +44,10 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = tableEntry{}
- }
+ e.table = [dFastShortTableSize]tableEntry{}
+ e.longTable = [dFastLongTableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
@@ -109,7 +108,7 @@
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
- if debug {
+ if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@@ -124,8 +123,8 @@
panic("offset0 was 0")
}
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -139,9 +138,9 @@
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -167,11 +166,11 @@
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
- if debug {
- println("repeat ended", s, lenght)
+ if debugEncoder {
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -208,7 +207,7 @@
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, dFastLongTableBits)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@@ -304,16 +303,16 @@
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- e.longTable[hash8(cv0, dFastLongTableBits)] = te0
- e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+ e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
- e.table[hash5(cv0, dFastShortTableBits)] = te0
- e.table[hash5(cv1, dFastShortTableBits)] = te1
+ e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+ e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
@@ -330,8 +329,8 @@
}
// Store this, since we have it.
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -368,7 +367,7 @@
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
- if debug {
+ if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@@ -385,7 +384,7 @@
)
// Protect against e.cur wraparound.
- if e.cur >= bufferReset {
+ if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@@ -427,7 +426,7 @@
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
- if debug {
+ if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@@ -436,8 +435,8 @@
var t int32
for {
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -483,7 +482,7 @@
s += length + repOff
nextEmit = s
if s >= sLimit {
- if debug {
+ if debugEncoder {
println("repeat ended", s, length)
}
@@ -521,7 +520,7 @@
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, dFastLongTableBits)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@@ -614,16 +613,16 @@
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- e.longTable[hash8(cv0, dFastLongTableBits)] = te0
- e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+ e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
- e.table[hash5(cv0, dFastShortTableBits)] = te0
- e.table[hash5(cv1, dFastShortTableBits)] = te1
+ e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+ e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
@@ -640,8 +639,8 @@
}
// Store this, since we have it.
- nextHashS := hash5(cv1>>8, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -677,12 +676,12 @@
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
- if debug {
+ if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@@ -697,7 +696,7 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -767,7 +766,7 @@
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
- if debug {
+ if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@@ -782,8 +781,8 @@
panic("offset0 was 0")
}
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -799,9 +798,9 @@
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -827,11 +826,11 @@
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
- if debug {
- println("repeat ended", s, lenght)
+ if debugEncoder {
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -868,7 +867,7 @@
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
- nextHashL = hash8(cv, dFastLongTableBits)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@@ -965,8 +964,8 @@
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
- longHash1 := hash8(cv0, dFastLongTableBits)
- longHash2 := hash8(cv0, dFastLongTableBits)
+ longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@@ -977,8 +976,8 @@
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
- hashVal1 := hash5(cv0, dFastShortTableBits)
- hashVal2 := hash5(cv1, dFastShortTableBits)
+ hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
+ hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
e.table[hashVal1] = te0
e.markShardDirty(hashVal1)
e.table[hashVal2] = te1
@@ -999,8 +998,8 @@
}
// Store this, since we have it.
- nextHashS := hash5(cv, dFastShortTableBits)
- nextHashL := hash8(cv, dFastLongTableBits)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -1039,7 +1038,7 @@
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
- if debug {
+ if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// If we encoded more than 64K mark all dirty.
@@ -1071,21 +1070,21 @@
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
- e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: e.maxMatchOff,
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
- e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
+ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: i,
}
}
}
e.lastDictID = d.id
- e.allDirty = true
+ allDirty = true
}
// Reset table to initial state
e.cur = e.maxMatchOff
@@ -1100,7 +1099,8 @@
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1111,7 +1111,9 @@
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}