[VOL-5486] Upgrade library versions

Change-Id: I8b4e88699e03f44ee13e467867f45ae3f0a63c4b
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore
new file mode 100644
index 0000000..5d7e88d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.gitignore
@@ -0,0 +1,36 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
+
+fuzz/*.zip
diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE
new file mode 100644
index 0000000..bd899d8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md
new file mode 100644
index 0000000..dee7754
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/README.md
@@ -0,0 +1,92 @@
+# lz4 : LZ4 compression in pure Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4)
+[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4/v4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/v4/cmd/lz4c@latest
+```
+
+Usage
+
+```
+Usage of lz4c:
+  -version
+        print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+  -bc
+        enable block checksum
+  -l int
+        compression level (0=fastest)
+  -sc
+        disable stream checksum
+  -size string
+        block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+	// Compress the input string.
+	_, _ = io.Copy(zw, r)
+	_ = zw.Close() // Make sure the writer is closed
+	_ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
new file mode 100644
index 0000000..8df0dc7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
@@ -0,0 +1,222 @@
+package lz4
+
+import (
+	"errors"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+type crState int
+
+const (
+	crStateInitial crState = iota
+	crStateReading 
+	crStateFlushing
+	crStateDone
+)
+
+type CompressingReader struct {
+	state crState
+	src io.ReadCloser // source reader
+	level lz4block.CompressionLevel // how hard to try
+	frame *lz4stream.Frame // frame being built
+	in []byte
+	out ovWriter
+	handler func(int)
+}
+
+// NewCompressingReader creates a reader which reads compressed data from
+// raw stream. This makes it a logical opposite of a normal lz4.Reader.
+// We require an io.ReadCloser as an underlying source for compatibility
+// with Go's http.Request.
+func NewCompressingReader(src io.ReadCloser) *CompressingReader {
+	zrd := &CompressingReader {
+		frame: lz4stream.NewFrame(),
+	}
+
+	_ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone)
+	zrd.Reset(src)
+
+	return zrd
+}
+
+// Source exposes the underlying source stream for introspection and control.
+func (zrd *CompressingReader) Source() io.ReadCloser {
+	return zrd.src
+}
+
+// Close simply invokes the underlying stream Close method. This method is
+// provided for the benefit of Go http client/server, which relies on Close
+// for goroutine termination.
+func (zrd *CompressingReader) Close() error {
+	return zrd.src.Close()
+}
+
+// Apply applies useful options to the lz4 encoder.
+func (zrd *CompressingReader) Apply(options ...Option) (err error) {
+	if zrd.state != crStateInitial {
+		return lz4errors.ErrOptionClosedOrError
+	}
+
+	zrd.Reset(zrd.src)
+
+	for _, o := range options {
+		if err = o(zrd); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (*CompressingReader) private() {}
+
+func (zrd *CompressingReader) init() error {
+	zrd.frame.InitW(&zrd.out, 1, false)
+	size := zrd.frame.Descriptor.Flags.BlockSizeIndex()
+	zrd.in = size.Get()
+	return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out)
+}
+
+// Read allows reading of lz4 compressed data
+func (zrd *CompressingReader) Read(p []byte) (n int, err error) {
+	defer func() {
+		if err != nil {
+			zrd.state = crStateDone
+		}
+	}()
+
+	if !zrd.out.reset(p) {
+		return len(p), nil
+	}
+
+	switch zrd.state {
+	case crStateInitial:
+		err = zrd.init()
+		if err != nil {
+			return
+		}
+		zrd.state = crStateReading
+	case crStateDone:
+		return 0, errors.New("This reader is done")
+	case crStateFlushing:
+		if zrd.out.dataPos > 0 {
+			n = zrd.out.dataPos
+			zrd.out.data = nil
+			zrd.out.dataPos = 0
+			return
+		} else {
+			zrd.state = crStateDone
+			return 0, io.EOF
+		}
+	}
+
+	for zrd.state == crStateReading {
+		block := zrd.frame.Blocks.Block
+
+		var rCount int
+		rCount, err = io.ReadFull(zrd.src, zrd.in)
+		switch err {
+		case nil:
+			err = block.Compress(
+				zrd.frame, zrd.in[ : rCount], zrd.level,
+			).Write(zrd.frame, &zrd.out)
+			zrd.handler(len(block.Data))
+			if err != nil {
+				return
+			}
+
+			if zrd.out.dataPos == len(zrd.out.data) {
+				n = zrd.out.dataPos
+				zrd.out.dataPos = 0
+				zrd.out.data = nil
+				return
+			}
+		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+			if rCount > 0 {
+				err = block.Compress(
+					zrd.frame, zrd.in[ : rCount], zrd.level,
+				).Write(zrd.frame, &zrd.out)
+				zrd.handler(len(block.Data))
+				if err != nil {
+					return
+				}
+			}
+
+			err = zrd.frame.CloseW(&zrd.out, 1)
+			if err != nil {
+				return
+			}
+			zrd.state = crStateFlushing
+
+			n = zrd.out.dataPos
+			zrd.out.dataPos = 0
+			zrd.out.data = nil
+			return
+		default:
+			return
+		}
+	}
+
+	err = lz4errors.ErrInternalUnhandledState
+	return
+}
+
+// Reset makes the stream usable again; mostly handy to reuse lz4 encoder
+// instances.
+func (zrd *CompressingReader) Reset(src io.ReadCloser) {
+	zrd.frame.Reset(1)
+	zrd.state = crStateInitial
+	zrd.src = src
+	zrd.out.clear()
+}
+
+type ovWriter struct {
+	data []byte
+	ov []byte
+	dataPos int
+	ovPos int
+}
+
+func (wr *ovWriter) Write(p []byte) (n int, err error) {
+	count := copy(wr.data[wr.dataPos : ], p)
+	wr.dataPos += count
+
+	if count < len(p) {
+		wr.ov = append(wr.ov, p[count : ]...)
+	}
+
+	return len(p), nil
+}
+
+func (wr *ovWriter) reset(out []byte) bool {
+	ovRem := len(wr.ov) - wr.ovPos
+
+	if ovRem >= len(out) {
+		wr.ovPos += copy(out, wr.ov[wr.ovPos : ])
+		return false
+	}
+
+	if ovRem > 0 {
+		copy(out, wr.ov[wr.ovPos : ])
+		wr.ov = wr.ov[ : 0]
+		wr.ovPos = 0
+		wr.dataPos = ovRem
+	} else if wr.ovPos > 0 {
+		wr.ov = wr.ov[ : 0]
+		wr.ovPos = 0
+		wr.dataPos = 0
+	}
+
+	wr.data = out
+	return true
+}
+
+func (wr *ovWriter) clear() {
+	wr.data = nil
+	wr.dataPos = 0
+	wr.ov = wr.ov[ : 0]
+	wr.ovPos = 0
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
new file mode 100644
index 0000000..fec8adb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
@@ -0,0 +1,481 @@
+package lz4block
+
+import (
+	"encoding/binary"
+	"math/bits"
+	"sync"
+
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+const (
+	// The following constants are used to setup the compression algorithm.
+	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog = 16 // LZ4 64Kb window size limit
+	winSize    = 1 << winSizeLog
+	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
+
+	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+func recoverBlock(e *error) {
+	if r := recover(); r != nil && *e == nil {
+		*e = lz4errors.ErrInvalidSourceShortBuffer
+	}
+}
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+	const prime6bytes = 227718039650203
+	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+func UncompressBlock(src, dst, dict []byte) (int, error) {
+	if len(src) == 0 {
+		return 0, nil
+	}
+	if di := decodeBlock(dst, src, dict); di >= 0 {
+		return di, nil
+	}
+	return 0, lz4errors.ErrInvalidSourceShortBuffer
+}
+
+type Compressor struct {
+	// Offsets are at most 64kiB, so we can store only the lower 16 bits of
+	// match positions: effectively, an offset from some 64kiB block boundary.
+	//
+	// When we retrieve such an offset, we interpret it as relative to the last
+	// block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
+	// depending on which of these is inside the current window. If a table
+	// entry was generated more than 64kiB back in the input, we find out by
+	// inspecting the input stream.
+	table [htSize]uint16
+
+	// Bitmap indicating which positions in the table are in use.
+	// This allows us to quickly reset the table for reuse,
+	// without having to zero everything.
+	inUse [htSize / 32]uint32
+}
+
+// Get returns the position of a presumptive match for the hash h.
+// The match may be a false positive due to a hash collision or an old entry.
+// If si < winSize, the return value may be negative.
+func (c *Compressor) get(h uint32, si int) int {
+	h &= htSize - 1
+	i := 0
+	if c.inUse[h/32]&(1<<(h%32)) != 0 {
+		i = int(c.table[h])
+	}
+	i += si &^ winMask
+	if i >= si {
+		// Try previous 64kiB block (negative when in first block).
+		i -= winSize
+	}
+	return i
+}
+
+func (c *Compressor) put(h uint32, si int) {
+	h &= htSize - 1
+	c.table[h] = uint16(si)
+	c.inUse[h/32] |= 1 << (h % 32)
+}
+
+func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
+
+var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
+
+func CompressBlock(src, dst []byte) (int, error) {
+	c := compressorPool.Get().(*Compressor)
+	n, err := c.CompressBlock(src, dst)
+	compressorPool.Put(c)
+	return n, err
+}
+
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+	// Zero out reused table to avoid non-deterministic output (issue #65).
+	c.reset()
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	// si: Current position of the search.
+	// anchor: Position of the current literals.
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+	for si < sn {
+		// Hash the next 6 bytes (sequence)...
+		match := binary.LittleEndian.Uint64(src[si:])
+		h := blockHash(match)
+		h2 := blockHash(match >> 8)
+
+		// We check a match at s, s+1 and s+2 and pick the first one we get.
+		// Checking 3 only requires us to load the source one.
+		ref := c.get(h, si)
+		ref2 := c.get(h2, si+1)
+		c.put(h, si)
+		c.put(h2, si+1)
+
+		offset := si - ref
+
+		if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
+			// No match. Start calculating another hash.
+			// The processor can usually do this out-of-order.
+			h = blockHash(match >> 16)
+			ref3 := c.get(h, si+2)
+
+			// Check the second match at si+1
+			si += 1
+			offset = si - ref2
+
+			if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+				// No match. Check the third match at si+2
+				si += 1
+				offset = si - ref3
+				c.put(h, si)
+
+				if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
+					// Skip one extra byte (at si+3) before we check 3 matches again.
+					si += 2 + (si-anchor)>>adaptSkipLog
+					continue
+				}
+			}
+		}
+
+		// Match found.
+		lLen := si - anchor // Literal length.
+		// We already matched 4 bytes.
+		mLen := 4
+
+		// Extend backwards if we can, reducing literals.
+		tOff := si - offset - 1
+		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+			si--
+			tOff--
+			lLen--
+			mLen++
+		}
+
+		// Add the match length, so we continue search at the end.
+		// Use mLen to store the offset base.
+		si, mLen = si+mLen, si+minMatch
+
+		// Find the longest match by looking by batches of 8 bytes.
+		for si+8 <= sn {
+			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+			if x == 0 {
+				si += 8
+			} else {
+				// Stop is first non-zero byte.
+				si += bits.TrailingZeros64(x) >> 3
+				break
+			}
+		}
+
+		mLen = si - mLen
+		if di >= len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF && di < len(dst); l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			if di >= len(dst) {
+				return 0, lz4errors.ErrInvalidSourceShortBuffer
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		if di+lLen > len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen + 2
+		anchor = si
+
+		// Encode offset.
+		if di > len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			if di >= len(dst) {
+				return 0, lz4errors.ErrInvalidSourceShortBuffer
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+		// Check if we can load next values.
+		if si >= sn {
+			break
+		}
+		// Hash match end-2
+		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+		c.put(h, si-2)
+	}
+
+lastLiterals:
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	if di >= len(dst) {
+		return 0, lz4errors.ErrInvalidSourceShortBuffer
+	}
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		if di >= len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	if di+len(src)-anchor > len(dst) {
+		return 0, lz4errors.ErrInvalidSourceShortBuffer
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> (32 - winSizeLog)
+}
+
+type CompressorHC struct {
+	// hashTable: stores the last position found for a given hash
+	// chainTable: stores previous positions for a given hash
+	hashTable, chainTable [htSize]int
+	needsReset            bool
+}
+
+var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
+
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
+	c := compressorHCPool.Get().(*CompressorHC)
+	n, err := c.CompressBlock(src, dst, depth)
+	compressorHCPool.Put(c)
+	return n, err
+}
+
+func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
+	if c.needsReset {
+		// Zero out reused table to avoid non-deterministic output (issue #65).
+		c.hashTable = [htSize]int{}
+		c.chainTable = [htSize]int{}
+	}
+	c.needsReset = true // Only false on first call.
+
+	defer recoverBlock(&err)
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	if depth == 0 {
+		depth = winSize
+	}
+
+	for si < sn {
+		// Hash the next 4 bytes (sequence).
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHashHC(match)
+
+		// Follow the chain until out of window and give the longest match.
+		mLen := 0
+		offset := 0
+		for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
+			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+			// must match to improve on the match length.
+			if src[next+mLen] != src[si+mLen] {
+				continue
+			}
+			ml := 0
+			// Compare the current position with a previous with the same hash.
+			for ml < sn-si {
+				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+				if x == 0 {
+					ml += 8
+				} else {
+					// Stop is first non-zero byte.
+					ml += bits.TrailingZeros64(x) >> 3
+					break
+				}
+			}
+			if ml < minMatch || ml <= mLen {
+				// Match too small (<minMath) or smaller than the current match.
+				continue
+			}
+			// Found a longer match, keep its position and length.
+			mLen = ml
+			offset = si - next
+			// Try another previous position with the same hash.
+		}
+		c.chainTable[si&winMask] = c.hashTable[h]
+		c.hashTable[h] = si
+
+		// No match found.
+		if mLen == 0 {
+			si += 1 + (si-anchor)>>adaptSkipLog
+			continue
+		}
+
+		// Match found.
+		// Update hash/chain tables with overlapping bytes:
+		// si already hashed, add everything from si+1 up to the match length.
+		winStart := si + 1
+		if ws := si + mLen - winSize; ws > winStart {
+			winStart = ws
+		}
+		for si, ml := winStart, si+mLen; si < ml; {
+			match >>= 8
+			match |= uint32(src[si+3]) << 24
+			h := blockHashHC(match)
+			c.chainTable[si&winMask] = c.hashTable[h]
+			c.hashTable[h] = si
+			si++
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // Match length does not include minMatch.
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen
+		anchor = si
+
+		// Encode offset.
+		di += 2
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+lastLiterals:
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
new file mode 100644
index 0000000..138083d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
@@ -0,0 +1,87 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+	Block64Kb uint32 = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+	Block8Mb = 2 * Block4Mb
+)
+
+var (
+	BlockPool64K  = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+	BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+	BlockPool1M   = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+	BlockPool4M   = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+	BlockPool8M   = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+	switch b {
+	case Block64Kb:
+		return 4
+	case Block256Kb:
+		return 5
+	case Block1Mb:
+		return 6
+	case Block4Mb:
+		return 7
+	case Block8Mb: // only valid in legacy mode
+		return 3
+	}
+	return 0
+}
+
+func IsValid(b uint32) bool {
+	return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+	switch b {
+	case 4, 5, 6, 7:
+		return true
+	}
+	return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+	var buf interface{}
+	switch b {
+	case 4:
+		buf = BlockPool64K.Get()
+	case 5:
+		buf = BlockPool256K.Get()
+	case 6:
+		buf = BlockPool1M.Get()
+	case 7:
+		buf = BlockPool4M.Get()
+	case 3:
+		buf = BlockPool8M.Get()
+	}
+	return buf.([]byte)
+}
+
+func Put(buf []byte) {
+	// Safeguard: do not allow invalid buffers.
+	switch c := cap(buf); uint32(c) {
+	case Block64Kb:
+		BlockPool64K.Put(buf[:c])
+	case Block256Kb:
+		BlockPool256K.Put(buf[:c])
+	case Block1Mb:
+		BlockPool1M.Put(buf[:c])
+	case Block4Mb:
+		BlockPool4M.Put(buf[:c])
+	case Block8Mb:
+		BlockPool8M.Put(buf[:c])
+	}
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
new file mode 100644
index 0000000..1d00133
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
@@ -0,0 +1,448 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX literal and match lengths
+// DX token, match offset
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// R14 &dict
+// R15 len(dict)
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOSPLIT, $48-80
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, R11
+	MOVQ dst_len+8(FP), R8
+	ADDQ DI, R8
+
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R9
+	CMPQ R9, $0
+	JE   err_corrupt
+	ADDQ SI, R9
+
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+
+	// shortcut ends
+	// short output end
+	MOVQ R8, R12
+	SUBQ $32, R12
+	// short input end
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	XORL CX, CX
+
+loop:
+	// token := uint32(src[si])
+	MOVBLZX (SI), DX
+	INCQ SI
+
+	// lit_len = token >> 4
+	// if lit_len > 0
+	// CX = lit_len
+	MOVL DX, CX
+	SHRL $4, CX
+
+	// if lit_len != 0xF
+	CMPL CX, $0xF
+	JEQ  lit_len_loop
+	CMPQ DI, R12
+	JAE  copy_literal
+	CMPQ SI, R13
+	JAE  copy_literal
+
+	// copy shortcut
+
+	// A two-stage shortcut for the most common case:
+	// 1) If the literal length is 0..14, and there is enough space,
+	// enter the shortcut and copy 16 bytes on behalf of the literals
+	// (in the fast mode, only 8 bytes can be safely copied this way).
+	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
+	// manner; but we ensure that there's enough space in the output for
+	// those 18 bytes earlier, upon entering the shortcut (in other words,
+	// there is a combined check for both stages).
+
+	// copy literal
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+	ADDQ CX, DI
+	ADDQ CX, SI
+
+	MOVL DX, CX
+	ANDL $0xF, CX
+
+	// The second stage: prepare for match copying, decode full info.
+	// If it doesn't work out, the info won't be wasted.
+	// offset := uint16(data[:2])
+	MOVWLZX (SI), DX
+	TESTL DX, DX
+	JE err_corrupt
+	ADDQ $2, SI
+	JC err_short_buf
+
+	MOVQ DI, AX
+	SUBQ DX, AX
+	JC err_corrupt
+	CMPQ AX, DI
+	JA err_short_buf
+
+	// if we can't do the second stage then jump straight to read the
+	// match length, we already have the offset.
+	CMPL CX, $0xF
+	JEQ match_len_loop_pre
+	CMPL DX, $8
+	JLT match_len_loop_pre
+	CMPQ AX, R11
+	JB match_len_loop_pre
+
+	// memcpy(op + 0, match + 0, 8);
+	MOVQ (AX), BX
+	MOVQ BX, (DI)
+	// memcpy(op + 8, match + 8, 8);
+	MOVQ 8(AX), BX
+	MOVQ BX, 8(DI)
+	// memcpy(op +16, match +16, 2);
+	MOVW 16(AX), BX
+	MOVW BX, 16(DI)
+
+	LEAQ const_minMatch(DI)(CX*1), DI
+
+	// shortcut complete, load next token
+	JMP loopcheck
+
+	// Read the rest of the literal length:
+	// do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
+lit_len_loop:
+	CMPQ SI, R9
+	JAE err_short_buf
+
+	MOVBLZX (SI), BX
+	INCQ SI
+	ADDQ BX, CX
+
+	CMPB BX, $0xFF
+	JE lit_len_loop
+
+copy_literal:
+	// bounds check src and dst
+	MOVQ SI, AX
+	ADDQ CX, AX
+	JC err_short_buf
+	CMPQ AX, R9
+	JA err_short_buf
+
+	MOVQ DI, BX
+	ADDQ CX, BX
+	JC err_short_buf
+	CMPQ BX, R8
+	JA err_short_buf
+
+	// Copy literals of <=48 bytes through the XMM registers.
+	CMPQ CX, $48
+	JGT memmove_lit
+
+	// if len(dst[di:]) < 48
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $48
+	JLT memmove_lit
+
+	// if len(src[si:]) < 48
+	MOVQ R9, BX
+	SUBQ SI, BX
+	CMPQ BX, $48
+	JLT memmove_lit
+
+	MOVOU (SI), X0
+	MOVOU 16(SI), X1
+	MOVOU 32(SI), X2
+	MOVOU X0, (DI)
+	MOVOU X1, 16(DI)
+	MOVOU X2, 32(DI)
+
+	ADDQ CX, SI
+	ADDQ CX, DI
+
+	JMP finish_lit_copy
+
+memmove_lit:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+
+	// Spill registers. Increment SI, DI now so we don't need to save CX.
+	ADDQ CX, DI
+	ADDQ CX, SI
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVL DX, 40(SP)
+
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVL 40(SP), DX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+finish_lit_copy:
+	// CX := mLen
+	// free up DX to use for offset
+	MOVL DX, CX
+	ANDL $0xF, CX
+
+	CMPQ SI, R9
+	JAE end
+
+	// offset
+	// si += 2
+	// DX := int(src[si-2]) | int(src[si-1])<<8
+	ADDQ $2, SI
+	JC err_short_buf
+	CMPQ SI, R9
+	JA err_short_buf
+	MOVWQZX -2(SI), DX
+
+	// 0 offset is invalid
+	TESTL DX, DX
+	JEQ   err_corrupt
+
+match_len_loop_pre:
+	// if mlen != 0xF
+	CMPB CX, $0xF
+	JNE copy_match
+
+	// do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
+match_len_loop:
+	CMPQ SI, R9
+	JAE err_short_buf
+
+	MOVBLZX (SI), BX
+	INCQ SI
+	ADDQ BX, CX
+
+	CMPB BX, $0xFF
+	JE match_len_loop
+
+copy_match:
+	ADDQ $const_minMatch, CX
+
+	// check we have match_len bytes left in dst
+	// di+match_len < len(dst)
+	MOVQ DI, AX
+	ADDQ CX, AX
+	JC err_short_buf
+	CMPQ AX, R8
+	JA err_short_buf
+
+	// DX = offset
+	// CX = match_len
+	// BX = &dst + (di - offset)
+	MOVQ DI, BX
+	SUBQ DX, BX
+
+	// check BX is within dst
+	// if BX < &dst
+	JC copy_match_from_dict
+	CMPQ BX, R11
+	JBE copy_match_from_dict
+
+	// if offset + match_len < di
+	LEAQ (BX)(CX*1), AX
+	CMPQ DI, AX
+	JA copy_interior_match
+
+	// AX := len(dst[:di])
+	// MOVQ DI, AX
+	// SUBQ R11, AX
+
+	// copy 16 bytes at a time
+	// if di-offset < 16 copy 16-(di-offset) bytes to di
+	// then do the remaining
+
+copy_match_loop:
+	// for match_len >= 0
+	// dst[di] = dst[i]
+	// di++
+	// i++
+	MOVB (BX), AX
+	MOVB AX, (DI)
+	INCQ DI
+	INCQ BX
+	DECQ CX
+	JNZ copy_match_loop
+
+	JMP loopcheck
+
+copy_interior_match:
+	CMPQ CX, $16
+	JGT memmove_match
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_match
+
+	MOVOU (BX), X0
+	MOVOU X0, (DI)
+
+	ADDQ CX, DI
+	XORL CX, CX
+	JMP  loopcheck
+
+copy_match_from_dict:
+	// CX = match_len
+	// BX = &dst + (di - offset)
+
+	// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
+	MOVQ R11, AX
+	SUBQ BX, AX
+
+	// BX = len(dict) - dict_bytes_available
+	MOVQ R15, BX
+	SUBQ AX, BX
+	JS err_short_dict
+
+	ADDQ R14, BX
+
+	// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
+	CMPQ CX, AX
+	JLT memmove_match
+
+	// The match stretches over the dictionary and our block
+	// 1) copy what comes from the dictionary
+	// AX = dict_bytes_available = copy_size
+	// BX = &dict_end - copy_size
+	// CX = match_len
+
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ AX, 16(SP)
+	// store extra stuff we want to recover
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 16(SP), AX // copy_size
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX // match_len
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	// di+=copy_size
+	ADDQ AX, DI
+
+	// 2) copy the rest from the current block
+	// CX = match_len - copy_size = rest_size
+	SUBQ AX, CX
+	MOVQ R11, BX
+
+	// check if we have a copy overlap
+	// AX = &dst + rest_size
+	MOVQ CX, AX
+	ADDQ BX, AX
+	// if &dst + rest_size > di, copy byte by byte
+	CMPQ AX, DI
+
+	JA copy_match_loop
+
+memmove_match:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ CX, 16(SP)
+
+	// Spill registers. Increment DI now so we don't need to save CX.
+	ADDQ CX, DI
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	XORL CX, CX
+
+loopcheck:
+	// for si < len(src)
+	CMPQ SI, R9
+	JB   loop
+
+end:
+	// Remaining length must be zero.
+	TESTQ CX, CX
+	JNE   err_corrupt
+
+	SUBQ R11, DI
+	MOVQ DI, ret+72(FP)
+	RET
+
+err_corrupt:
+	MOVQ $-1, ret+72(FP)
+	RET
+
+err_short_buf:
+	MOVQ $-2, ret+72(FP)
+	RET
+
+err_short_dict:
+	MOVQ $-3, ret+72(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
new file mode 100644
index 0000000..20b21fc
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
@@ -0,0 +1,231 @@
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst	R0
+#define dstorig	R1
+#define src	R2
+#define dstend	R3
+#define srcend	R4
+#define match	R5	// Match address.
+#define dictend	R6
+#define token	R7
+#define len	R8	// Literal and match lengths.
+#define offset	R7	// Match offset; overlaps with token.
+#define tmp1	R9
+#define tmp2	R11
+#define tmp3	R12
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
+	MOVW dst_base  +0(FP), dst
+	MOVW dst_len   +4(FP), dstend
+	MOVW src_base +12(FP), src
+	MOVW src_len  +16(FP), srcend
+
+	CMP $0, srcend
+	BEQ shortSrc
+
+	ADD dst, dstend
+	ADD src, srcend
+
+	MOVW dst, dstorig
+
+loop:
+	// Read token. Extract literal length.
+	MOVBU.P 1(src), token
+	MOVW    token >> 4, len
+	CMP     $15, len
+	BNE     readLitlenDone
+
+readLitlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADD.S   tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readLitlenLoop
+
+readLitlenDone:
+	CMP $0, len
+	BEQ copyLiteralDone
+
+	// Bounds check dst+len and src+len.
+	ADD.S    dst, len, tmp1
+	ADD.CC.S src, len, tmp2
+	BCS      shortSrc
+	CMP      dstend, tmp1
+	//BHI    shortDst // Uncomment for distinct error codes.
+	CMP.LS   srcend, tmp2
+	BHI      shortSrc
+
+	// Copy literal.
+	CMP $4, len
+	BLO copyLiteralFinish
+
+	// Copy 0-3 bytes until src is aligned.
+	TST        $1, src
+	MOVBU.NE.P 1(src), tmp1
+	MOVB.NE.P  tmp1, 1(dst)
+	SUB.NE     $1, len
+
+	TST        $2, src
+	MOVHU.NE.P 2(src), tmp2
+	MOVB.NE.P  tmp2, 1(dst)
+	MOVW.NE    tmp2 >> 8, tmp1
+	MOVB.NE.P  tmp1, 1(dst)
+	SUB.NE     $2, len
+
+	B copyLiteralLoopCond
+
+copyLiteralLoop:
+	// Aligned load, unaligned write.
+	MOVW.P 4(src), tmp1
+	MOVW   tmp1 >>  8, tmp2
+	MOVB   tmp2, 1(dst)
+	MOVW   tmp1 >> 16, tmp3
+	MOVB   tmp3, 2(dst)
+	MOVW   tmp1 >> 24, tmp2
+	MOVB   tmp2, 3(dst)
+	MOVB.P tmp1, 4(dst)
+copyLiteralLoopCond:
+	// Loop until len-4 < 0.
+	SUB.S  $4, len
+	BPL    copyLiteralLoop
+
+copyLiteralFinish:
+	// Copy remaining 0-3 bytes.
+	// At this point, len may be < 0, but len&3 is still accurate.
+	TST       $1, len
+	MOVB.NE.P 1(src), tmp3
+	MOVB.NE.P tmp3, 1(dst)
+	TST       $2, len
+	MOVB.NE.P 2(src), tmp1
+	MOVB.NE.P tmp1, 2(dst)
+	MOVB.NE   -1(src), tmp2
+	MOVB.NE   tmp2, -1(dst)
+
+copyLiteralDone:
+	// Initial part of match length.
+	// This frees up the token register for reuse as offset.
+	AND $15, token, len
+
+	CMP src, srcend
+	BEQ end
+
+	// Read offset.
+	ADD.S $2, src
+	BCS   shortSrc
+	CMP   srcend, src
+	BHI   shortSrc
+	MOVBU -2(src), offset
+	MOVBU -1(src), tmp1
+	ORR.S tmp1 << 8, offset
+	BEQ   corrupt
+
+	// Read rest of match length.
+	CMP $15, len
+	BNE readMatchlenDone
+
+readMatchlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADD.S   tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readMatchlenLoop
+
+readMatchlenDone:
+	// Bounds check dst+len+minMatch.
+	ADD.S    dst, len, tmp1
+	ADD.CC.S $const_minMatch, tmp1
+	BCS      shortDst
+	CMP      dstend, tmp1
+	BHI      shortDst
+
+	RSB dst, offset, match
+	CMP dstorig, match
+	BGE copyMatch4
+
+	// match < dstorig means the match starts in the dictionary,
+	// at len(dict) - offset + (dst - dstorig).
+	MOVW dict_base+24(FP), match
+	MOVW dict_len +28(FP), dictend
+
+	ADD $const_minMatch, len
+
+	RSB   dst, dstorig, tmp1
+	RSB   dictend, offset, tmp2
+	ADD.S tmp2, tmp1
+	BMI   shortDict
+	ADD   match, dictend
+	ADD   tmp1, match
+
+copyDict:
+	MOVBU.P 1(match), tmp1
+	MOVB.P  tmp1, 1(dst)
+	SUB.S   $1, len
+	CMP.NE  match, dictend
+	BNE     copyDict
+
+	// If the match extends beyond the dictionary, the rest is at dstorig.
+	CMP  $0, len
+	BEQ  copyMatchDone
+	MOVW dstorig, match
+	B    copyMatch
+
+	// Copy a regular match.
+	// Since len+minMatch is at least four, we can do a 4× unrolled
+	// byte copy loop. Using MOVW instead of four byte loads is faster,
+	// but to remain portable we'd have to align match first, which is
+	// too expensive. By alternating loads and stores, we also handle
+	// the case offset < 4.
+copyMatch4:
+	SUB.S   $4, len
+	MOVBU.P 4(match), tmp1
+	MOVB.P  tmp1, 4(dst)
+	MOVBU   -3(match), tmp2
+	MOVB    tmp2, -3(dst)
+	MOVBU   -2(match), tmp3
+	MOVB    tmp3, -2(dst)
+	MOVBU   -1(match), tmp1
+	MOVB    tmp1, -1(dst)
+	BPL     copyMatch4
+
+	// Restore len, which is now negative.
+	ADD.S $4, len
+	BEQ   copyMatchDone
+
+copyMatch:
+	// Finish with a byte-at-a-time copy.
+	SUB.S   $1, len
+	MOVBU.P 1(match), tmp2
+	MOVB.P  tmp2, 1(dst)
+	BNE     copyMatch
+
+copyMatchDone:
+	CMP src, srcend
+	BNE loop
+
+end:
+	CMP  $0, len
+	BNE  corrupt
+	SUB  dstorig, dst, tmp1
+	MOVW tmp1, ret+36(FP)
+	RET
+
+	// The error cases have distinct labels so we can put different
+	// return codes here when debugging, or if the error returns need to
+	// be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+	MOVW $-1, tmp1
+	MOVW tmp1, ret+36(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
new file mode 100644
index 0000000..d2fe11b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
@@ -0,0 +1,241 @@
+// +build gc
+// +build !noasm
+
+// This implementation assumes that strict alignment checking is turned off.
+// The Go compiler makes the same assumption.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst		R0
+#define dstorig		R1
+#define src		R2
+#define dstend		R3
+#define dstend16	R4	// dstend - 16
+#define srcend		R5
+#define srcend16	R6	// srcend - 16
+#define match		R7	// Match address.
+#define dict		R8
+#define dictlen		R9
+#define dictend		R10
+#define token		R11
+#define len		R12	// Literal and match lengths.
+#define lenRem		R13
+#define offset		R14	// Match offset.
+#define tmp1		R15
+#define tmp2		R16
+#define tmp3		R17
+#define tmp4		R19
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
+	LDP  dst_base+0(FP), (dst, dstend)
+	ADD  dst, dstend
+	MOVD dst, dstorig
+
+	LDP src_base+24(FP), (src, srcend)
+	CBZ srcend, shortSrc
+	ADD src, srcend
+
+	// dstend16 = max(dstend-16, 0) and similarly for srcend16.
+	SUBS $16, dstend, dstend16
+	CSEL LO, ZR, dstend16, dstend16
+	SUBS $16, srcend, srcend16
+	CSEL LO, ZR, srcend16, srcend16
+
+	LDP dict_base+48(FP), (dict, dictlen)
+	ADD dict, dictlen, dictend
+
+loop:
+	// Read token. Extract literal length.
+	MOVBU.P 1(src), token
+	LSR     $4, token, len
+	CMP     $15, len
+	BNE     readLitlenDone
+
+readLitlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADDS    tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readLitlenLoop
+
+readLitlenDone:
+	CBZ len, copyLiteralDone
+
+	// Bounds check dst+len and src+len.
+	ADDS dst, len, tmp1
+	BCS  shortSrc
+	ADDS src, len, tmp2
+	BCS  shortSrc
+	CMP  dstend, tmp1
+	BHI  shortDst
+	CMP  srcend, tmp2
+	BHI  shortSrc
+
+	// Copy literal.
+	SUBS $16, len
+	BLO  copyLiteralShort
+
+copyLiteralLoop:
+	LDP.P 16(src), (tmp1, tmp2)
+	STP.P (tmp1, tmp2), 16(dst)
+	SUBS  $16, len
+	BPL   copyLiteralLoop
+
+	// Copy (final part of) literal of length 0-15.
+	// If we have >=16 bytes left in src and dst, just copy 16 bytes.
+copyLiteralShort:
+	CMP  dstend16, dst
+	CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
+	BHS  copyLiteralShortEnd
+
+	AND $15, len
+
+	LDP (src), (tmp1, tmp2)
+	ADD len, src
+	STP (tmp1, tmp2), (dst)
+	ADD len, dst
+
+	B copyLiteralDone
+
+	// Safe but slow copy near the end of src, dst.
+copyLiteralShortEnd:
+	TBZ     $3, len, 3(PC)
+	MOVD.P  8(src), tmp1
+	MOVD.P  tmp1, 8(dst)
+	TBZ     $2, len, 3(PC)
+	MOVW.P  4(src), tmp2
+	MOVW.P  tmp2, 4(dst)
+	TBZ     $1, len, 3(PC)
+	MOVH.P  2(src), tmp3
+	MOVH.P  tmp3, 2(dst)
+	TBZ     $0, len, 3(PC)
+	MOVBU.P 1(src), tmp4
+	MOVB.P  tmp4, 1(dst)
+
+copyLiteralDone:
+	// Initial part of match length.
+	AND $15, token, len
+
+	CMP src, srcend
+	BEQ end
+
+	// Read offset.
+	ADDS  $2, src
+	BCS   shortSrc
+	CMP   srcend, src
+	BHI   shortSrc
+	MOVHU -2(src), offset
+	CBZ   offset, corrupt
+
+	// Read rest of match length.
+	CMP $15, len
+	BNE readMatchlenDone
+
+readMatchlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADDS    tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readMatchlenLoop
+
+readMatchlenDone:
+	ADD $const_minMatch, len
+
+	// Bounds check dst+len.
+	ADDS dst, len, tmp2
+	BCS  shortDst
+	CMP  dstend, tmp2
+	BHI  shortDst
+
+	SUB offset, dst, match
+	CMP dstorig, match
+	BHS copyMatchTry8
+
+	// match < dstorig means the match starts in the dictionary,
+	// at len(dict) - offset + (dst - dstorig).
+	SUB  dstorig, dst, tmp1
+	SUB  offset, dictlen, tmp2
+	ADDS tmp2, tmp1
+	BMI  shortDict
+	ADD  dict, tmp1, match
+
+copyDict:
+	MOVBU.P 1(match), tmp3
+	MOVB.P  tmp3, 1(dst)
+	SUBS    $1, len
+	CCMP    NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
+	BNE     copyDict
+
+	CBZ len, copyMatchDone
+
+	// If the match extends beyond the dictionary, the rest is at dstorig.
+	// Recompute the offset for the next check.
+	MOVD dstorig, match
+	SUB  dstorig, dst, offset
+
+copyMatchTry8:
+	// Copy doublewords if both len and offset are at least eight.
+	// A 16-at-a-time loop doesn't provide a further speedup.
+	CMP  $8, len
+	CCMP HS, offset, $8, $0
+	BLO  copyMatchTry4
+
+	AND    $7, len, lenRem
+	SUB    $8, len
+copyMatchLoop8:
+	MOVD.P 8(match), tmp1
+	MOVD.P tmp1, 8(dst)
+	SUBS   $8, len
+	BPL    copyMatchLoop8
+
+	MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
+	ADD  lenRem, dst
+	MOVD $0, len
+	MOVD tmp2, -8(dst)
+	B    copyMatchDone
+
+copyMatchTry4:
+	// Copy words if both len and offset are at least four.
+	CMP  $4, len
+	CCMP HS, offset, $4, $0
+	BLO  copyMatchLoop1
+
+	MOVWU.P 4(match), tmp2
+	MOVWU.P tmp2, 4(dst)
+	SUBS    $4, len
+	BEQ     copyMatchDone
+
+copyMatchLoop1:
+	// Byte-at-a-time copy for small offsets <= 3.
+	MOVBU.P 1(match), tmp2
+	MOVB.P  tmp2, 1(dst)
+	SUBS    $1, len
+	BNE     copyMatchLoop1
+
+copyMatchDone:
+	CMP src, srcend
+	BNE loop
+
+end:
+	CBNZ len, corrupt
+	SUB  dstorig, dst, tmp1
+	MOVD tmp1, ret+72(FP)
+	RET
+
+	// The error cases have distinct labels so we can put different
+	// return codes here when debugging, or if the error returns need to
+	// be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+	MOVD $-1, tmp1
+	MOVD tmp1, ret+72(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
new file mode 100644
index 0000000..8d9023d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
@@ -0,0 +1,10 @@
+//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
+// +build amd64 arm arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4block
+
+//go:noescape
+func decodeBlock(dst, src, dict []byte) int
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
new file mode 100644
index 0000000..9f568fb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
@@ -0,0 +1,139 @@
+//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm,!arm64 appengine !gc noasm
+
+package lz4block
+
+import (
+	"encoding/binary"
+)
+
+func decodeBlock(dst, src, dict []byte) (ret int) {
+	// Restrict capacities so we don't read or write out of bounds.
+	dst = dst[:len(dst):len(dst)]
+	src = src[:len(src):len(src)]
+
+	const hasError = -2
+
+	if len(src) == 0 {
+		return hasError
+	}
+
+	defer func() {
+		if recover() != nil {
+			ret = hasError
+		}
+	}()
+
+	var si, di uint
+	for si < uint(len(src)) {
+		// Literals and match lengths (token).
+		b := uint(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			switch {
+			case lLen < 0xF && si+16 < uint(len(src)):
+				// Shortcut 1
+				// if we have enough room in src and dst, and the literals length
+				// is small enough (0..14) then copy all 16 bytes, even if not all
+				// are part of the literals.
+				copy(dst[di:], src[si:si+16])
+				si += lLen
+				di += lLen
+				if mLen := b & 0xF; mLen < 0xF {
+					// Shortcut 2
+					// if the match length (4..18) fits within the literals, then copy
+					// all 18 bytes, even if not all are part of the literals.
+					mLen += 4
+					if offset := u16(src[si:]); mLen <= offset && offset < di {
+						i := di - offset
+						// The remaining buffer may not hold 18 bytes.
+						// See https://github.com/pierrec/lz4/issues/51.
+						if end := i + 18; end <= uint(len(dst)) {
+							copy(dst[di:], dst[i:end])
+							si += 2
+							di += mLen
+							continue
+						}
+					}
+				}
+			case lLen == 0xF:
+				for {
+					x := uint(src[si])
+					if lLen += x; int(lLen) < 0 {
+						return hasError
+					}
+					si++
+					if x != 0xFF {
+						break
+					}
+				}
+				fallthrough
+			default:
+				copy(dst[di:di+lLen], src[si:si+lLen])
+				si += lLen
+				di += lLen
+			}
+		}
+
+		mLen := b & 0xF
+		if si == uint(len(src)) && mLen == 0 {
+			break
+		} else if si >= uint(len(src)) {
+			return hasError
+		}
+
+		offset := u16(src[si:])
+		if offset == 0 {
+			return hasError
+		}
+		si += 2
+
+		// Match.
+		mLen += minMatch
+		if mLen == minMatch+0xF {
+			for {
+				x := uint(src[si])
+				if mLen += x; int(mLen) < 0 {
+					return hasError
+				}
+				si++
+				if x != 0xFF {
+					break
+				}
+			}
+		}
+
+		// Copy the match.
+		if di < offset {
+			// The match is beyond our block, meaning the first part
+			// is in the dictionary.
+			fromDict := dict[uint(len(dict))+di-offset:]
+			n := uint(copy(dst[di:di+mLen], fromDict))
+			di += n
+			if mLen -= n; mLen == 0 {
+				continue
+			}
+			// We copied n = offset-di bytes from the dictionary,
+			// then set di = di+n = offset, so the following code
+			// copies from dst[di-offset:] = dst[0:].
+		}
+
+		expanded := dst[di-offset:]
+		if mLen > offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
+	}
+
+	return int(di)
+}
+
+func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
new file mode 100644
index 0000000..710ea42
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
@@ -0,0 +1,19 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+	ErrInvalidSourceShortBuffer      Error = "lz4: invalid source or destination buffer too short"
+	ErrInvalidFrame                  Error = "lz4: bad magic number"
+	ErrInternalUnhandledState        Error = "lz4: unhandled state"
+	ErrInvalidHeaderChecksum         Error = "lz4: invalid header checksum"
+	ErrInvalidBlockChecksum          Error = "lz4: invalid block checksum"
+	ErrInvalidFrameChecksum          Error = "lz4: invalid frame checksum"
+	ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+	ErrOptionClosedOrError           Error = "lz4: cannot apply options on closed or in error object"
+	ErrOptionInvalidBlockSize        Error = "lz4: invalid block size"
+	ErrOptionNotApplicable           Error = "lz4: option not applicable"
+	ErrWriterNotClosed               Error = "lz4: writer not closed"
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
new file mode 100644
index 0000000..04aaca8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -0,0 +1,348 @@
+package lz4stream
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+type Blocks struct {
+	Block  *FrameDataBlock
+	Blocks chan chan *FrameDataBlock
+	mu     sync.Mutex
+	err    error
+}
+
+func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
+	if num == 1 {
+		b.Blocks = nil
+		b.Block = NewFrameDataBlock(f)
+		return
+	}
+	b.Block = nil
+	if cap(b.Blocks) != num {
+		b.Blocks = make(chan chan *FrameDataBlock, num)
+	}
+	// goroutine managing concurrent block compression goroutines.
+	go func() {
+		// Process next block compression item.
+		for c := range b.Blocks {
+			// Read the next compressed block result.
+			// Waiting here ensures that the blocks are output in the order they were sent.
+			// The incoming channel is always closed as it indicates to the caller that
+			// the block has been processed.
+			block := <-c
+			if block == nil {
+				// Notify the block compression routine that we are done with its result.
+				// This is used when a sentinel block is sent to terminate the compression.
+				close(c)
+				return
+			}
+			// Do not attempt to write the block upon any previous failure.
+			if b.err == nil {
+				// Write the block.
+				if err := block.Write(f, dst); err != nil {
+					// Keep the first error.
+					b.err = err
+					// All pending compression goroutines need to shut down, so we need to keep going.
+				}
+			}
+			close(c)
+		}
+	}()
+}
+
+func (b *Blocks) close(f *Frame, num int) error {
+	if num == 1 {
+		if b.Block != nil {
+			b.Block.Close(f)
+		}
+		err := b.err
+		b.err = nil
+		return err
+	}
+	if b.Blocks == nil {
+		err := b.err
+		b.err = nil
+		return err
+	}
+	c := make(chan *FrameDataBlock)
+	b.Blocks <- c
+	c <- nil
+	<-c
+	err := b.err
+	b.err = nil
+	return err
+}
+
+// ErrorR returns any error set while uncompressing a stream.
+func (b *Blocks) ErrorR() error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.err
+}
+
+// initR returns a channel that streams the uncompressed blocks if in concurrent
+// mode and no error. When the channel is closed, check for any error with b.ErrorR.
+//
+// If not in concurrent mode, the uncompressed block is b.Block and the returned error
+// needs to be checked.
+func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
+	size := f.Descriptor.Flags.BlockSizeIndex()
+	if num == 1 {
+		b.Blocks = nil
+		b.Block = NewFrameDataBlock(f)
+		return nil, nil
+	}
+	b.Block = nil
+	blocks := make(chan chan []byte, num)
+	// data receives the uncompressed blocks.
+	data := make(chan []byte)
+	// Read blocks from the source sequentially
+	// and uncompress them concurrently.
+
+	// In legacy mode, accrue the uncompress sizes in cum.
+	var cum uint32
+	go func() {
+		var cumx uint32
+		var err error
+		for b.ErrorR() == nil {
+			block := NewFrameDataBlock(f)
+			cumx, err = block.Read(f, src, 0)
+			if err != nil {
+				block.Close(f)
+				break
+			}
+			// Recheck for an error as reading may be slow and uncompressing is expensive.
+			if b.ErrorR() != nil {
+				block.Close(f)
+				break
+			}
+			c := make(chan []byte)
+			blocks <- c
+			go func() {
+				defer block.Close(f)
+				data, err := block.Uncompress(f, size.Get(), nil, false)
+				if err != nil {
+					b.closeR(err)
+					// Close the block channel to indicate an error.
+					close(c)
+				} else {
+					c <- data
+				}
+			}()
+		}
+		// End the collection loop and the data channel.
+		c := make(chan []byte)
+		blocks <- c
+		c <- nil // signal the collection loop that we are done
+		<-c      // wait for the collect loop to complete
+		if f.isLegacy() && cum == cumx {
+			err = io.EOF
+		}
+		b.closeR(err)
+		close(data)
+	}()
+	// Collect the uncompressed blocks and make them available
+	// on the returned channel.
+	go func(leg bool) {
+		defer close(blocks)
+		skipBlocks := false
+		for c := range blocks {
+			buf, ok := <-c
+			if !ok {
+				// A closed channel indicates an error.
+				// All remaining channels should be discarded.
+				skipBlocks = true
+				continue
+			}
+			if buf == nil {
+				// Signal to end the loop.
+				close(c)
+				return
+			}
+			if skipBlocks {
+				// A previous error has occurred, skipping remaining channels.
+				continue
+			}
+			// Perform checksum now as the blocks are received in order.
+			if f.Descriptor.Flags.ContentChecksum() {
+				_, _ = f.checksum.Write(buf)
+			}
+			if leg {
+				cum += uint32(len(buf))
+			}
+			data <- buf
+			close(c)
+		}
+	}(f.isLegacy())
+	return data, nil
+}
+
+// closeR safely sets the error on b if not already set.
+func (b *Blocks) closeR(err error) {
+	b.mu.Lock()
+	if b.err == nil {
+		b.err = err
+	}
+	b.mu.Unlock()
+}
+
+func NewFrameDataBlock(f *Frame) *FrameDataBlock {
+	buf := f.Descriptor.Flags.BlockSizeIndex().Get()
+	return &FrameDataBlock{Data: buf, data: buf}
+}
+
+type FrameDataBlock struct {
+	Size     DataBlockSize
+	Data     []byte // compressed or uncompressed data (.data or .src)
+	Checksum uint32
+	data     []byte // buffer for compressed data
+	src      []byte // uncompressed data
+	err      error  // used in concurrent mode
+}
+
+func (b *FrameDataBlock) Close(f *Frame) {
+	b.Size = 0
+	b.Checksum = 0
+	b.err = nil
+	if b.data != nil {
+		// Block was not already closed.
+		lz4block.Put(b.data)
+		b.Data = nil
+		b.data = nil
+		b.src = nil
+	}
+}
+
+// Block compression errors are ignored since the buffer is sized appropriately.
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
+	data := b.data
+	if f.isLegacy() {
+		data = data[:cap(data)]
+	} else {
+		data = data[:len(src)] // trigger the incompressible flag in CompressBlock
+	}
+	var n int
+	switch level {
+	case lz4block.Fast:
+		n, _ = lz4block.CompressBlock(src, data)
+	default:
+		n, _ = lz4block.CompressBlockHC(src, data, level)
+	}
+	if n == 0 {
+		b.Size.UncompressedSet(true)
+		b.Data = src
+	} else {
+		b.Size.UncompressedSet(false)
+		b.Data = data[:n]
+	}
+	b.Size.sizeSet(len(b.Data))
+	b.src = src // keep track of the source for content checksum
+
+	if f.Descriptor.Flags.BlockChecksum() {
+		b.Checksum = xxh32.ChecksumZero(b.Data)
+	}
+	return b
+}
+
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
+	// Write is called in the same order as blocks are compressed,
+	// so content checksum must be done here.
+	if f.Descriptor.Flags.ContentChecksum() {
+		_, _ = f.checksum.Write(b.src)
+	}
+	buf := f.buf[:]
+	binary.LittleEndian.PutUint32(buf, uint32(b.Size))
+	if _, err := dst.Write(buf[:4]); err != nil {
+		return err
+	}
+
+	if _, err := dst.Write(b.Data); err != nil {
+		return err
+	}
+
+	if b.Checksum == 0 {
+		return nil
+	}
+	binary.LittleEndian.PutUint32(buf, b.Checksum)
+	_, err := dst.Write(buf[:4])
+	return err
+}
+
+// Read updates b with the next block data, size and checksum if available.
+func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
+	x, err := f.readUint32(src)
+	if err != nil {
+		return 0, err
+	}
+	if f.isLegacy() {
+		switch x {
+		case frameMagicLegacy:
+			// Concatenated legacy frame.
+			return b.Read(f, src, cum)
+		case cum:
+			// Only works in non concurrent mode, for concurrent mode
+			// it is handled separately.
+			// Linux kernel format appends the total uncompressed size at the end.
+			return 0, io.EOF
+		}
+	} else if x == 0 {
+		// Marker for end of stream.
+		return 0, io.EOF
+	}
+	b.Size = DataBlockSize(x)
+
+	size := b.Size.size()
+	if size > cap(b.data) {
+		return x, lz4errors.ErrOptionInvalidBlockSize
+	}
+	b.data = b.data[:size]
+	if _, err := io.ReadFull(src, b.data); err != nil {
+		return x, err
+	}
+	if f.Descriptor.Flags.BlockChecksum() {
+		sum, err := f.readUint32(src)
+		if err != nil {
+			return 0, err
+		}
+		b.Checksum = sum
+	}
+	return x, nil
+}
+
+func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
+	if b.Size.Uncompressed() {
+		n := copy(dst, b.data)
+		dst = dst[:n]
+	} else {
+		n, err := lz4block.UncompressBlock(b.data, dst, dict)
+		if err != nil {
+			return nil, err
+		}
+		dst = dst[:n]
+	}
+	if f.Descriptor.Flags.BlockChecksum() {
+		if c := xxh32.ChecksumZero(b.data); c != b.Checksum {
+			err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
+			return nil, err
+		}
+	}
+	if sum && f.Descriptor.Flags.ContentChecksum() {
+		_, _ = f.checksum.Write(dst)
+	}
+	return dst, nil
+}
+
+func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
+	if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
+		return
+	}
+	x = binary.LittleEndian.Uint32(f.buf[:4])
+	return
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
new file mode 100644
index 0000000..18192a9
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
@@ -0,0 +1,204 @@
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+//go:generate go run gen.go
+
+const (
+	frameMagic       uint32 = 0x184D2204
+	frameSkipMagic   uint32 = 0x184D2A50
+	frameMagicLegacy uint32 = 0x184C2102
+)
+
+func NewFrame() *Frame {
+	return &Frame{}
+}
+
+type Frame struct {
+	buf        [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
+	Magic      uint32
+	Descriptor FrameDescriptor
+	Blocks     Blocks
+	Checksum   uint32
+	checksum   xxh32.XXHZero
+}
+
+// Reset allows reusing the Frame.
+// The Descriptor configuration is not modified.
+func (f *Frame) Reset(num int) {
+	f.Magic = 0
+	f.Descriptor.Checksum = 0
+	f.Descriptor.ContentSize = 0
+	_ = f.Blocks.close(f, num)
+	f.Checksum = 0
+}
+
+func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
+	if legacy {
+		f.Magic = frameMagicLegacy
+		idx := lz4block.Index(lz4block.Block8Mb)
+		f.Descriptor.Flags.BlockSizeIndexSet(idx)
+	} else {
+		f.Magic = frameMagic
+		f.Descriptor.initW()
+	}
+	f.Blocks.initW(f, dst, num)
+	f.checksum.Reset()
+}
+
+func (f *Frame) CloseW(dst io.Writer, num int) error {
+	if err := f.Blocks.close(f, num); err != nil {
+		return err
+	}
+	if f.isLegacy() {
+		return nil
+	}
+	buf := f.buf[:0]
+	// End mark (data block size of uint32(0)).
+	buf = append(buf, 0, 0, 0, 0)
+	if f.Descriptor.Flags.ContentChecksum() {
+		buf = f.checksum.Sum(buf)
+	}
+	_, err := dst.Write(buf)
+	return err
+}
+
+func (f *Frame) isLegacy() bool {
+	return f.Magic == frameMagicLegacy
+}
+
+func (f *Frame) ParseHeaders(src io.Reader) error {
+	if f.Magic > 0 {
+		// Header already read.
+		return nil
+	}
+
+newFrame:
+	var err error
+	if f.Magic, err = f.readUint32(src); err != nil {
+		return err
+	}
+	switch m := f.Magic; {
+	case m == frameMagic || m == frameMagicLegacy:
+	// All 16 values of frameSkipMagic are valid.
+	case m>>8 == frameSkipMagic>>8:
+		skip, err := f.readUint32(src)
+		if err != nil {
+			return err
+		}
+		if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
+			return err
+		}
+		goto newFrame
+	default:
+		return lz4errors.ErrInvalidFrame
+	}
+	if err := f.Descriptor.initR(f, src); err != nil {
+		return err
+	}
+	f.checksum.Reset()
+	return nil
+}
+
+func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
+	return f.Blocks.initR(f, num, src)
+}
+
+func (f *Frame) CloseR(src io.Reader) (err error) {
+	if f.isLegacy() {
+		return nil
+	}
+	if !f.Descriptor.Flags.ContentChecksum() {
+		return nil
+	}
+	if f.Checksum, err = f.readUint32(src); err != nil {
+		return err
+	}
+	if c := f.checksum.Sum32(); c != f.Checksum {
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
+	}
+	return nil
+}
+
+type FrameDescriptor struct {
+	Flags       DescriptorFlags
+	ContentSize uint64
+	Checksum    uint8
+}
+
+func (fd *FrameDescriptor) initW() {
+	fd.Flags.VersionSet(1)
+	fd.Flags.BlockIndependenceSet(true)
+}
+
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
+	if fd.Checksum > 0 {
+		// Header already written.
+		return nil
+	}
+
+	buf := f.buf[:4]
+	// Write the magic number here even though it belongs to the Frame.
+	binary.LittleEndian.PutUint32(buf, f.Magic)
+	if !f.isLegacy() {
+		buf = buf[:4+2]
+		binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
+
+		if fd.Flags.Size() {
+			buf = buf[:4+2+8]
+			binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
+		}
+		fd.Checksum = descriptorChecksum(buf[4:])
+		buf = append(buf, fd.Checksum)
+	}
+
+	_, err := dst.Write(buf)
+	return err
+}
+
+func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
+	if f.isLegacy() {
+		idx := lz4block.Index(lz4block.Block8Mb)
+		f.Descriptor.Flags.BlockSizeIndexSet(idx)
+		return nil
+	}
+	// Read the flags and the checksum, hoping that there is not content size.
+	buf := f.buf[:3]
+	if _, err := io.ReadFull(src, buf); err != nil {
+		return err
+	}
+	descr := binary.LittleEndian.Uint16(buf)
+	fd.Flags = DescriptorFlags(descr)
+	if fd.Flags.Size() {
+		// Append the 8 missing bytes.
+		buf = buf[:3+8]
+		if _, err := io.ReadFull(src, buf[3:]); err != nil {
+			return err
+		}
+		fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
+	}
+	fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
+	buf = buf[:len(buf)-1]        // all descriptor fields except checksum
+	if c := descriptorChecksum(buf); fd.Checksum != c {
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
+	}
+	// Validate the elements that can be.
+	if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+		return lz4errors.ErrOptionInvalidBlockSize
+	}
+	return nil
+}
+
+func descriptorChecksum(buf []byte) byte {
+	return byte(xxh32.ChecksumZero(buf) >> 8)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
new file mode 100644
index 0000000..d33a6be
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
@@ -0,0 +1,103 @@
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+package lz4stream
+
+import "github.com/pierrec/lz4/v4/internal/lz4block"
+
+// DescriptorFlags is defined as follow:
+//   field              bits
+//   -----              ----
+//   _                  2
+//   ContentChecksum    1
+//   Size               1
+//   BlockChecksum      1
+//   BlockIndependence  1
+//   Version            2
+//   _                  4
+//   BlockSizeIndex     3
+//   _                  1
+type DescriptorFlags uint16
+
+// Getters.
+func (x DescriptorFlags) ContentChecksum() bool   { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool              { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool     { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16         { return uint16(x >> 6 & 0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
+	return lz4block.BlockSizeIndex(x >> 12 & 0x7)
+}
+
+// Setters.
+func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
+	const b = 1 << 2
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
+	const b = 1 << 3
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
+	const b = 1 << 4
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
+	const b = 1 << 5
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
+	*x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
+	return x
+}
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
+	*x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
+	return x
+}
+
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+// DataBlockSize is defined as follow:
+//   field         bits
+//   -----         ----
+//   size          31
+//   Uncompressed  1
+type DataBlockSize uint32
+
+// Getters.
+func (x DataBlockSize) size() int          { return int(x & 0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
+
+// Setters.
+func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
+	*x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
+	return x
+}
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
+	const b = 1 << 31
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000..651d10c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,212 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (ported from the reference implementation https://github.com/Cyan4973/xxHash/)
+package xxh32
+
+import (
+	"encoding/binary"
+)
+
+const (
+	prime1 uint32 = 2654435761
+	prime2 uint32 = 2246822519
+	prime3 uint32 = 3266489917
+	prime4 uint32 = 668265263
+	prime5 uint32 = 374761393
+
+	primeMask   = 0xFFFFFFFF
+	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+	v        [4]uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+	xxh.v[0] = prime1plus2
+	xxh.v[1] = prime2
+	xxh.v[2] = 0
+	xxh.v[3] = prime1minus
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+	return 4
+}
+
+// BlockSizeIndex gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+	if xxh.totalLen == 0 {
+		xxh.Reset()
+	}
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	var buf *[16]byte
+	if m != 0 {
+		// some data left from previous update
+		buf = &xxh.buf
+		c := copy(buf[m:], input)
+		n -= c
+		input = input[c:]
+	}
+	update(&xxh.v, buf, input)
+	xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
+
+	return n, nil
+}
+
+// Portable version of update. This updates v by processing all of buf
+// (if not nil) and all full 16-byte blocks of input.
+func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
+	// Causes compiler to work directly from registers instead of stack:
+	v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
+
+	if buf != nil {
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+	}
+
+	for ; len(input) >= 16; input = input[16:] {
+		sub := input[:16] //BCE hint for compiler
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+	}
+	v[0], v[1], v[2], v[3] = v1, v2, v3, v4
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if h32 >= 16 {
+		h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
+	} else {
+		h32 += prime5
+	}
+
+	p := 0
+	n := xxh.bufused
+	buf := xxh.buf
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(buf[p]) * prime5
+		h32 = rol11(h32) * prime1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Portable version of ChecksumZero.
+func checksumZeroGo(input []byte) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += prime5
+	} else {
+		v1 := prime1plus2
+		v2 := prime2
+		v3 := uint32(0)
+		v4 := prime1minus
+		p := 0
+		for n := n - 16; p <= n; p += 16 {
+			sub := input[p:][:16] //BCE hint for compiler
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+		}
+		input = input[p:]
+		n -= p
+		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+	}
+
+	p := 0
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime5
+		h32 = rol11(h32) * prime1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+func rol1(u uint32) uint32 {
+	return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+	return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+	return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+	return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+	return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+	return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+	return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
new file mode 100644
index 0000000..0978b26
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
@@ -0,0 +1,11 @@
+// +build !noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+//
+//go:noescape
+func ChecksumZero(input []byte) uint32
+
+//go:noescape
+func update(v *[4]uint32, buf *[16]byte, input []byte)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
new file mode 100644
index 0000000..c18ffd5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
@@ -0,0 +1,251 @@
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define p	R0
+#define n	R1
+#define h	R2
+#define v1	R2	// Alias for h.
+#define v2	R3
+#define v3	R4
+#define v4	R5
+#define x1	R6
+#define x2	R7
+#define x3	R8
+#define x4	R9
+
+// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
+#define prime1r	R11
+#define prime2r	R12
+#define prime3r	R3	// The rest can alias v{2-4}.
+#define prime4r	R4
+#define prime5r	R5
+
+// Update round macros. These read from and increment p.
+
+#define round16aligned			\
+	MOVM.IA.W (p), [x1, x2, x3, x4]	\
+					\
+	MULA x1, prime2r, v1, v1	\
+	MULA x2, prime2r, v2, v2	\
+	MULA x3, prime2r, v3, v3	\
+	MULA x4, prime2r, v4, v4	\
+					\
+	MOVW v1 @> 19, v1		\
+	MOVW v2 @> 19, v2		\
+	MOVW v3 @> 19, v3		\
+	MOVW v4 @> 19, v4		\
+					\
+	MUL prime1r, v1			\
+	MUL prime1r, v2			\
+	MUL prime1r, v3			\
+	MUL prime1r, v4			\
+
+#define round16unaligned 		\
+	MOVBU.P  16(p), x1		\
+	MOVBU   -15(p), x2		\
+	ORR     x2 <<  8, x1		\
+	MOVBU   -14(p), x3		\
+	MOVBU   -13(p), x4		\
+	ORR     x4 <<  8, x3		\
+	ORR     x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v1, v1	\
+	MOVW v1 @> 19, v1		\
+	MUL prime1r, v1			\
+					\
+	MOVBU -12(p), x1		\
+	MOVBU -11(p), x2		\
+	ORR   x2 <<  8, x1		\
+	MOVBU -10(p), x3		\
+	MOVBU  -9(p), x4		\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v2, v2	\
+	MOVW v2 @> 19, v2		\
+	MUL prime1r, v2			\
+					\
+	MOVBU -8(p), x1			\
+	MOVBU -7(p), x2			\
+	ORR   x2 <<  8, x1		\
+	MOVBU -6(p), x3			\
+	MOVBU -5(p), x4			\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v3, v3	\
+	MOVW v3 @> 19, v3		\
+	MUL prime1r, v3			\
+					\
+	MOVBU -4(p), x1			\
+	MOVBU -3(p), x2			\
+	ORR   x2 <<  8, x1		\
+	MOVBU -2(p), x3			\
+	MOVBU -1(p), x4			\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v4, v4	\
+	MOVW v4 @> 19, v4		\
+	MUL prime1r, v4			\
+
+
+// func ChecksumZero([]byte) uint32
+TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
+	MOVW input_base+0(FP), p
+	MOVW input_len+4(FP),  n
+
+	MOVW $const_prime1, prime1r
+	MOVW $const_prime2, prime2r
+
+	// Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
+	// here, but that's a pseudo-op that generates a load through R11.
+	MOVW $const_prime5, prime5r
+	ADD  prime5r, n, h
+	CMP  $0, n
+	BEQ  end
+
+	// We let n go negative so we can do comparisons with SUB.S
+	// instead of separate CMP.
+	SUB.S $16, n
+	BMI   loop16done
+
+	ADD  prime1r, prime2r, v1
+	MOVW prime2r, v2
+	MOVW $0, v3
+	RSB  $0, prime1r, v4
+
+	TST $3, p
+	BNE loop16unaligned
+
+loop16aligned:
+	SUB.S $16, n
+	round16aligned
+	BPL loop16aligned
+	B   loop16finish
+
+loop16unaligned:
+	SUB.S $16, n
+	round16unaligned
+	BPL loop16unaligned
+
+loop16finish:
+	MOVW v1 @> 31, h
+	ADD  v2 @> 25, h
+	ADD  v3 @> 20, h
+	ADD  v4 @> 14, h
+
+	// h += len(input) with v2 as temporary.
+	MOVW input_len+4(FP), v2
+	ADD  v2, h
+
+loop16done:
+	ADD $16, n	// Restore number of bytes left.
+
+	SUB.S $4, n
+	MOVW  $const_prime3, prime3r
+	BMI   loop4done
+	MOVW  $const_prime4, prime4r
+
+	TST $3, p
+	BNE loop4unaligned
+
+loop4aligned:
+	SUB.S $4, n
+
+	MOVW.P 4(p), x1
+	MULA   prime3r, x1, h, h
+	MOVW   h @> 15, h
+	MUL    prime4r, h
+
+	BPL loop4aligned
+	B   loop4done
+
+loop4unaligned:
+	SUB.S $4, n
+
+	MOVBU.P  4(p), x1
+	MOVBU   -3(p), x2
+	ORR     x2 <<  8, x1
+	MOVBU   -2(p), x3
+	ORR     x3 << 16, x1
+	MOVBU   -1(p), x4
+	ORR     x4 << 24, x1
+
+	MULA prime3r, x1, h, h
+	MOVW h @> 15, h
+	MUL  prime4r, h
+
+	BPL loop4unaligned
+
+loop4done:
+	ADD.S $4, n	// Restore number of bytes left.
+	BEQ   end
+
+	MOVW $const_prime5, prime5r
+
+loop1:
+	SUB.S $1, n
+
+	MOVBU.P 1(p), x1
+	MULA    prime5r, x1, h, h
+	MOVW    h @> 21, h
+	MUL     prime1r, h
+
+	BNE loop1
+
+end:
+	MOVW $const_prime3, prime3r
+	EOR  h >> 15, h
+	MUL  prime2r, h
+	EOR  h >> 13, h
+	MUL  prime3r, h
+	EOR  h >> 16, h
+
+	MOVW h, ret+12(FP)
+	RET
+
+
+// func update(v *[4]uint64, buf *[16]byte, p []byte)
+TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
+	MOVW    v+0(FP), p
+	MOVM.IA (p), [v1, v2, v3, v4]
+
+	MOVW $const_prime1, prime1r
+	MOVW $const_prime2, prime2r
+
+	// Process buf, if not nil.
+	MOVW buf+4(FP), p
+	CMP  $0, p
+	BEQ  noBuffered
+
+	round16aligned
+
+noBuffered:
+	MOVW input_base +8(FP), p
+	MOVW input_len +12(FP), n
+
+	SUB.S $16, n
+	BMI   end
+
+	TST $3, p
+	BNE loop16unaligned
+
+loop16aligned:
+	SUB.S $16, n
+	round16aligned
+	BPL loop16aligned
+	B   end
+
+loop16unaligned:
+	SUB.S $16, n
+	round16unaligned
+	BPL loop16unaligned
+
+end:
+	MOVW    v+0(FP), p
+	MOVM.IA [v1, v2, v3, v4], (p)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
new file mode 100644
index 0000000..c96b59b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
@@ -0,0 +1,10 @@
+// +build !arm noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
+
+func update(v *[4]uint32, buf *[16]byte, input []byte) {
+	updateGo(v, buf, input)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go
new file mode 100644
index 0000000..a62022e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/lz4.go
@@ -0,0 +1,157 @@
+// Package lz4 implements reading and writing lz4 compressed data.
+//
+// The package supports both the LZ4 stream format,
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// and the LZ4 block format, defined at
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
+//
+// See https://github.com/lz4/lz4 for the reference C implementation.
+package lz4
+
+import (
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+func _() {
+	// Safety checks for duplicated elements.
+	var x [1]struct{}
+	_ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+	_ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+	_ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+	_ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+	_ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return lz4block.CompressBlockBound(n)
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst, nil)
+}
+
+// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
+// dictionary, and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst, dict)
+}
+
+// A Compressor compresses data into the LZ4 block format.
+// It uses a fast compression algorithm.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type Compressor struct{ c lz4block.Compressor }
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+	return c.c.CompressBlock(src, dst)
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+
+// CompressBlock is equivalent to Compressor.CompressBlock.
+// The final argument is ignored and should be set to nil.
+//
+// This function is deprecated. Use a Compressor instead.
+func CompressBlock(src, dst []byte, _ []int) (int, error) {
+	return lz4block.CompressBlock(src, dst)
+}
+
+// A CompressorHC compresses data into the LZ4 block format.
+// Its compression ratio is potentially better than that of a Compressor,
+// but it is also slower and requires more memory.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type CompressorHC struct {
+	// Level is the maximum search depth for compression.
+	// Values <= 0 mean no maximum.
+	Level CompressionLevel
+	c     lz4block.CompressorHC
+}
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
+	return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
+}
+
+// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
+// The final two arguments are ignored and should be set to nil.
+//
+// This function is deprecated. Use a CompressorHC instead.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
+	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
+}
+
+const (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
+	// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+	ErrInvalidFrame = lz4errors.ErrInvalidFrame
+	// ErrInternalUnhandledState is an internal error.
+	ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
+	// ErrInvalidHeaderChecksum is returned when reading a frame.
+	ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
+	// ErrInvalidBlockChecksum is returned when reading a frame.
+	ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
+	// ErrInvalidFrameChecksum is returned when reading a frame.
+	ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
+	// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+	ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
+	// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+	ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
+	// ErrOptionInvalidBlockSize is returned when
+	ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
+	// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+	ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
+	// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+	ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go
new file mode 100644
index 0000000..57a44e7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
+
+type (
+	applier interface {
+		Apply(...Option) error
+		private()
+	}
+	// Option defines the parameters to setup an LZ4 Writer or Reader.
+	Option func(applier) error
+)
+
+// String returns a string representation of the option with its parameter(s).
+func (o Option) String() string {
+	return o(nil).Error()
+}
+
+// Default options.
+var (
+	DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
+	DefaultChecksumOption  = ChecksumOption(true)
+	DefaultConcurrency     = ConcurrencyOption(1)
+	defaultOnBlockDone     = OnBlockDoneOption(nil)
+)
+
+const (
+	Block64Kb BlockSize = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+)
+
+// BlockSizeIndex defines the size of the blocks to be compressed.
+type BlockSize uint32
+
+// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
+func BlockSizeOption(size BlockSize) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("BlockSizeOption(%s)", size)
+			return lz4errors.Error(s)
+		case *Writer:
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+			}
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+			return nil
+		case *CompressingReader:
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+			}
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// BlockChecksumOption enables or disables block checksum (default=false).
+func BlockChecksumOption(flag bool) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// ChecksumOption enables/disables all blocks or content checksum (default=true).
+func ChecksumOption(flag bool) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("ChecksumOption(%v)", flag)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
+// whole uncompressed data stream.
+func SizeOption(size uint64) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("SizeOption(%d)", size)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.SizeSet(size > 0)
+			w.frame.Descriptor.ContentSize = size
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.SizeSet(size > 0)
+			w.frame.Descriptor.ContentSize = size
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// ConcurrencyOption sets the number of go routines used for compression.
+// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
+func ConcurrencyOption(n int) Option {
+	if n <= 0 {
+		n = runtime.GOMAXPROCS(0)
+	}
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("ConcurrencyOption(%d)", n)
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.num = n
+			return nil
+		case *Reader:
+			rw.num = n
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
+type CompressionLevel uint32
+
+const (
+	Fast   CompressionLevel = 0
+	Level1 CompressionLevel = 1 << (8 + iota)
+	Level2
+	Level3
+	Level4
+	Level5
+	Level6
+	Level7
+	Level8
+	Level9
+)
+
+// CompressionLevelOption defines the compression level (default=Fast).
+func CompressionLevelOption(level CompressionLevel) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("CompressionLevelOption(%s)", level)
+			return lz4errors.Error(s)
+		case *Writer:
+			switch level {
+			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+			default:
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+			}
+			w.level = lz4block.CompressionLevel(level)
+			return nil
+		case *CompressingReader:
+			switch level {
+			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+			default:
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+			}
+			w.level = lz4block.CompressionLevel(level)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+func onBlockDone(int) {}
+
+// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
+// for a Reader, it is when it has been uncompressed.
+func OnBlockDoneOption(handler func(size int)) Option {
+	if handler == nil {
+		handler = onBlockDone
+	}
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.handler = handler
+			return nil
+		case *Reader:
+			rw.handler = handler
+			return nil
+		case *CompressingReader:
+			rw.handler = handler
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// LegacyOption provides support for writing LZ4 frames in the legacy format.
+//
+// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
+//
+// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
+// the compressed stream is followed by the original (uncompressed) size of
+// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
+// This is also supported as a special case.
+func LegacyOption(legacy bool) Option {
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("LegacyOption(%v)", legacy)
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.legacy = legacy
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go
new file mode 100644
index 0000000..2de8149
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Block64Kb-65536]
+	_ = x[Block256Kb-262144]
+	_ = x[Block1Mb-1048576]
+	_ = x[Block4Mb-4194304]
+}
+
+const (
+	_BlockSize_name_0 = "Block64Kb"
+	_BlockSize_name_1 = "Block256Kb"
+	_BlockSize_name_2 = "Block1Mb"
+	_BlockSize_name_3 = "Block4Mb"
+)
+
+func (i BlockSize) String() string {
+	switch {
+	case i == 65536:
+		return _BlockSize_name_0
+	case i == 262144:
+		return _BlockSize_name_1
+	case i == 1048576:
+		return _BlockSize_name_2
+	case i == 4194304:
+		return _BlockSize_name_3
+	default:
+		return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Fast-0]
+	_ = x[Level1-512]
+	_ = x[Level2-1024]
+	_ = x[Level3-2048]
+	_ = x[Level4-4096]
+	_ = x[Level5-8192]
+	_ = x[Level6-16384]
+	_ = x[Level7-32768]
+	_ = x[Level8-65536]
+	_ = x[Level9-131072]
+}
+
+const (
+	_CompressionLevel_name_0 = "Fast"
+	_CompressionLevel_name_1 = "Level1"
+	_CompressionLevel_name_2 = "Level2"
+	_CompressionLevel_name_3 = "Level3"
+	_CompressionLevel_name_4 = "Level4"
+	_CompressionLevel_name_5 = "Level5"
+	_CompressionLevel_name_6 = "Level6"
+	_CompressionLevel_name_7 = "Level7"
+	_CompressionLevel_name_8 = "Level8"
+	_CompressionLevel_name_9 = "Level9"
+)
+
+func (i CompressionLevel) String() string {
+	switch {
+	case i == 0:
+		return _CompressionLevel_name_0
+	case i == 512:
+		return _CompressionLevel_name_1
+	case i == 1024:
+		return _CompressionLevel_name_2
+	case i == 2048:
+		return _CompressionLevel_name_3
+	case i == 4096:
+		return _CompressionLevel_name_4
+	case i == 8192:
+		return _CompressionLevel_name_5
+	case i == 16384:
+		return _CompressionLevel_name_6
+	case i == 32768:
+		return _CompressionLevel_name_7
+	case i == 65536:
+		return _CompressionLevel_name_8
+	case i == 131072:
+		return _CompressionLevel_name_9
+	default:
+		return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go
new file mode 100644
index 0000000..275daad
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/reader.go
@@ -0,0 +1,275 @@
+package lz4
+
+import (
+	"bytes"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var readerStates = []aState{
+	noState:     newState,
+	errorState:  newState,
+	newState:    readState,
+	readState:   closedState,
+	closedState: newState,
+}
+
+// NewReader returns a new LZ4 frame decoder.
+func NewReader(r io.Reader) *Reader {
+	return newReader(r, false)
+}
+
+func newReader(r io.Reader, legacy bool) *Reader {
+	zr := &Reader{frame: lz4stream.NewFrame()}
+	zr.state.init(readerStates)
+	_ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
+	zr.Reset(r)
+	return zr
+}
+
+// Reader allows reading an LZ4 stream.
+type Reader struct {
+	state   _State
+	src     io.Reader        // source reader
+	num     int              // concurrency level
+	frame   *lz4stream.Frame // frame being read
+	data    []byte           // block buffer allocated in non concurrent mode
+	reads   chan []byte      // pending data
+	idx     int              // size of pending data
+	handler func(int)
+	cum     uint32
+	dict    []byte
+}
+
+func (*Reader) private() {}
+
+func (r *Reader) Apply(options ...Option) (err error) {
+	defer r.state.check(&err)
+	switch r.state.state {
+	case newState:
+	case errorState:
+		return r.state.err
+	default:
+		return lz4errors.ErrOptionClosedOrError
+	}
+	for _, o := range options {
+		if err = o(r); err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Size returns the size of the underlying uncompressed data, if set in the stream.
+func (r *Reader) Size() int {
+	switch r.state.state {
+	case readState, closedState:
+		if r.frame.Descriptor.Flags.Size() {
+			return int(r.frame.Descriptor.ContentSize)
+		}
+	}
+	return 0
+}
+
+func (r *Reader) isNotConcurrent() bool {
+	return r.num == 1
+}
+
+func (r *Reader) init() error {
+	err := r.frame.ParseHeaders(r.src)
+	if err != nil {
+		return err
+	}
+	if !r.frame.Descriptor.Flags.BlockIndependence() {
+		// We can't decompress dependent blocks concurrently.
+		// Instead of throwing an error to the user, silently drop concurrency
+		r.num = 1
+	}
+	data, err := r.frame.InitR(r.src, r.num)
+	if err != nil {
+		return err
+	}
+	r.reads = data
+	r.idx = 0
+	size := r.frame.Descriptor.Flags.BlockSizeIndex()
+	r.data = size.Get()
+	r.cum = 0
+	return nil
+}
+
+func (r *Reader) Read(buf []byte) (n int, err error) {
+	defer r.state.check(&err)
+	switch r.state.state {
+	case readState:
+	case closedState, errorState:
+		return 0, r.state.err
+	case newState:
+		// First initialization.
+		if err = r.init(); r.state.next(err) {
+			return
+		}
+	default:
+		return 0, r.state.fail()
+	}
+	for len(buf) > 0 {
+		var bn int
+		if r.idx == 0 {
+			if r.isNotConcurrent() {
+				bn, err = r.read(buf)
+			} else {
+				lz4block.Put(r.data)
+				r.data = <-r.reads
+				if len(r.data) == 0 {
+					// No uncompressed data: something went wrong or we are done.
+					err = r.frame.Blocks.ErrorR()
+				}
+			}
+			switch err {
+			case nil:
+			case io.EOF:
+				if er := r.frame.CloseR(r.src); er != nil {
+					err = er
+				}
+				lz4block.Put(r.data)
+				r.data = nil
+				return
+			default:
+				return
+			}
+		}
+		if bn == 0 {
+			// Fill buf with buffered data.
+			bn = copy(buf, r.data[r.idx:])
+			r.idx += bn
+			if r.idx == len(r.data) {
+				// All data read, get ready for the next Read.
+				r.idx = 0
+			}
+		}
+		buf = buf[bn:]
+		n += bn
+		r.handler(bn)
+	}
+	return
+}
+
+// read uncompresses the next block as follow:
+// - if buf has enough room, the block is uncompressed into it directly
+//   and the lenght of used space is returned
+// - else, the uncompress data is stored in r.data and 0 is returned
+func (r *Reader) read(buf []byte) (int, error) {
+	block := r.frame.Blocks.Block
+	_, err := block.Read(r.frame, r.src, r.cum)
+	if err != nil {
+		return 0, err
+	}
+	var direct bool
+	dst := r.data[:cap(r.data)]
+	if len(buf) >= len(dst) {
+		// Uncompress directly into buf.
+		direct = true
+		dst = buf
+	}
+	dst, err = block.Uncompress(r.frame, dst, r.dict, true)
+	if err != nil {
+		return 0, err
+	}
+	if !r.frame.Descriptor.Flags.BlockIndependence() {
+		if len(r.dict)+len(dst) > 128*1024 {
+			preserveSize := 64*1024 - len(dst)
+			if preserveSize < 0 {
+				preserveSize = 0
+			}
+			r.dict = r.dict[len(r.dict)-preserveSize:]
+		}
+		r.dict = append(r.dict, dst...)
+	}
+	r.cum += uint32(len(dst))
+	if direct {
+		return len(dst), nil
+	}
+	r.data = dst
+	return 0, nil
+}
+
+// Reset clears the state of the Reader r such that it is equivalent to its
+// initial state from NewReader, but instead reading from reader.
+// No access to reader is performed.
+func (r *Reader) Reset(reader io.Reader) {
+	if r.data != nil {
+		lz4block.Put(r.data)
+		r.data = nil
+	}
+	r.frame.Reset(r.num)
+	r.state.reset()
+	r.src = reader
+	r.reads = nil
+}
+
+// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+	switch r.state.state {
+	case closedState, errorState:
+		return 0, r.state.err
+	case newState:
+		if err = r.init(); r.state.next(err) {
+			return
+		}
+	default:
+		return 0, r.state.fail()
+	}
+	defer r.state.nextd(&err)
+
+	var data []byte
+	if r.isNotConcurrent() {
+		size := r.frame.Descriptor.Flags.BlockSizeIndex()
+		data = size.Get()
+		defer lz4block.Put(data)
+	}
+	for {
+		var bn int
+		var dst []byte
+		if r.isNotConcurrent() {
+			bn, err = r.read(data)
+			dst = data[:bn]
+		} else {
+			lz4block.Put(dst)
+			dst = <-r.reads
+			bn = len(dst)
+			if bn == 0 {
+				// No uncompressed data: something went wrong or we are done.
+				err = r.frame.Blocks.ErrorR()
+			}
+		}
+		switch err {
+		case nil:
+		case io.EOF:
+			err = r.frame.CloseR(r.src)
+			return
+		default:
+			return
+		}
+		r.handler(bn)
+		bn, err = w.Write(dst)
+		n += int64(bn)
+		if err != nil {
+			return
+		}
+	}
+}
+
+// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header.
+func ValidFrameHeader(in []byte) (bool, error) {
+	f := lz4stream.NewFrame()
+	err := f.ParseHeaders(bytes.NewReader(in))
+	if err == nil {
+		return true, nil
+	}
+	if err == lz4errors.ErrInvalidFrame {
+		return false, nil
+	}
+	return false, err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go
new file mode 100644
index 0000000..d94f04d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state.go
@@ -0,0 +1,75 @@
+package lz4
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
+
+const (
+	noState     aState = iota // uninitialized reader
+	errorState                // unrecoverable error encountered
+	newState                  // instantiated object
+	readState                 // reading data
+	writeState                // writing data
+	closedState               // all done
+)
+
+type (
+	aState uint8
+	_State struct {
+		states []aState
+		state  aState
+		err    error
+	}
+)
+
+func (s *_State) init(states []aState) {
+	s.states = states
+	s.state = states[0]
+}
+
+func (s *_State) reset() {
+	s.state = s.states[0]
+	s.err = nil
+}
+
+// next sets the state to the next one unless it is passed a non nil error.
+// It returns whether or not it is in error.
+func (s *_State) next(err error) bool {
+	if err != nil {
+		s.err = fmt.Errorf("%s: %w", s.state, err)
+		s.state = errorState
+		return true
+	}
+	s.state = s.states[s.state]
+	return false
+}
+
+// nextd is like next but for defers.
+func (s *_State) nextd(errp *error) bool {
+	return errp != nil && s.next(*errp)
+}
+
+// check sets s in error if not already in error and if the error is not nil or io.EOF,
+func (s *_State) check(errp *error) {
+	if s.state == errorState || errp == nil {
+		return
+	}
+	if err := *errp; err != nil {
+		s.err = fmt.Errorf("%w[%s]", err, s.state)
+		if !errors.Is(err, io.EOF) {
+			s.state = errorState
+		}
+	}
+}
+
+func (s *_State) fail() error {
+	s.state = errorState
+	s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
+	return s.err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go
new file mode 100644
index 0000000..75fb828
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[noState-0]
+	_ = x[errorState-1]
+	_ = x[newState-2]
+	_ = x[readState-3]
+	_ = x[writeState-4]
+	_ = x[closedState-5]
+}
+
+const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
+
+var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
+
+func (i aState) String() string {
+	if i >= aState(len(_aState_index)-1) {
+		return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _aState_name[_aState_index[i]:_aState_index[i+1]]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go
new file mode 100644
index 0000000..4358ade
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/writer.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var writerStates = []aState{
+	noState:     newState,
+	newState:    writeState,
+	writeState:  closedState,
+	closedState: newState,
+	errorState:  newState,
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+func NewWriter(w io.Writer) *Writer {
+	zw := &Writer{frame: lz4stream.NewFrame()}
+	zw.state.init(writerStates)
+	_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
+	zw.Reset(w)
+	return zw
+}
+
+// Writer allows writing an LZ4 stream.
+type Writer struct {
+	state   _State
+	src     io.Writer                 // destination writer
+	level   lz4block.CompressionLevel // how hard to try
+	num     int                       // concurrency level
+	frame   *lz4stream.Frame          // frame being built
+	data    []byte                    // pending data
+	idx     int                       // size of pending data
+	handler func(int)
+	legacy  bool
+}
+
+func (*Writer) private() {}
+
+func (w *Writer) Apply(options ...Option) (err error) {
+	defer w.state.check(&err)
+	switch w.state.state {
+	case newState:
+	case errorState:
+		return w.state.err
+	default:
+		return lz4errors.ErrOptionClosedOrError
+	}
+	w.Reset(w.src)
+	for _, o := range options {
+		if err = o(w); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (w *Writer) isNotConcurrent() bool {
+	return w.num == 1
+}
+
+// init sets up the Writer when in newState. It does not change the Writer state.
+func (w *Writer) init() error {
+	w.frame.InitW(w.src, w.num, w.legacy)
+	size := w.frame.Descriptor.Flags.BlockSizeIndex()
+	w.data = size.Get()
+	w.idx = 0
+	return w.frame.Descriptor.Write(w.frame, w.src)
+}
+
+func (w *Writer) Write(buf []byte) (n int, err error) {
+	defer w.state.check(&err)
+	switch w.state.state {
+	case writeState:
+	case closedState, errorState:
+		return 0, w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return 0, w.state.fail()
+	}
+
+	zn := len(w.data)
+	for len(buf) > 0 {
+		if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
+			// Avoid a copy as there is enough data for a block.
+			if err = w.write(buf[:zn], false); err != nil {
+				return
+			}
+			n += zn
+			buf = buf[zn:]
+			continue
+		}
+		// Accumulate the data to be compressed.
+		m := copy(w.data[w.idx:], buf)
+		n += m
+		w.idx += m
+		buf = buf[m:]
+
+		if w.idx < len(w.data) {
+			// Buffer not filled.
+			return
+		}
+
+		// Buffer full.
+		if err = w.write(w.data, true); err != nil {
+			return
+		}
+		if !w.isNotConcurrent() {
+			size := w.frame.Descriptor.Flags.BlockSizeIndex()
+			w.data = size.Get()
+		}
+		w.idx = 0
+	}
+	return
+}
+
+func (w *Writer) write(data []byte, safe bool) error {
+	if w.isNotConcurrent() {
+		block := w.frame.Blocks.Block
+		err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
+		w.handler(len(block.Data))
+		return err
+	}
+	c := make(chan *lz4stream.FrameDataBlock)
+	w.frame.Blocks.Blocks <- c
+	go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
+		b := lz4stream.NewFrameDataBlock(w.frame)
+		c <- b.Compress(w.frame, data, w.level)
+		<-c
+		w.handler(len(b.Data))
+		b.Close(w.frame)
+		if safe {
+			// safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
+			lz4block.Put(data)
+		}
+	}(c, data, safe)
+
+	return nil
+}
+
+// Flush any buffered data to the underlying writer immediately.
+func (w *Writer) Flush() (err error) {
+	switch w.state.state {
+	case writeState:
+	case errorState:
+		return w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return nil
+	}
+
+	if w.idx > 0 {
+		// Flush pending data, disable w.data freeing as it is done later on.
+		if err = w.write(w.data[:w.idx], false); err != nil {
+			return err
+		}
+		w.idx = 0
+	}
+	return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying writer
+// without closing it.
+func (w *Writer) Close() error {
+	if err := w.Flush(); err != nil {
+		return err
+	}
+	err := w.frame.CloseW(w.src, w.num)
+	// It is now safe to free the buffer.
+	if w.data != nil {
+		lz4block.Put(w.data)
+		w.data = nil
+	}
+	return err
+}
+
+// Reset clears the state of the Writer w such that it is equivalent to its
+// initial state from NewWriter, but instead writing to writer.
+// Reset keeps the previous options unless overwritten by the supplied ones.
+// No access to writer is performed.
+//
+// w.Close must be called before Reset or pending data may be dropped.
+func (w *Writer) Reset(writer io.Writer) {
+	w.frame.Reset(w.num)
+	w.state.reset()
+	w.src = writer
+}
+
+// ReadFrom efficiently reads from r and compressed into the Writer destination.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+	switch w.state.state {
+	case closedState, errorState:
+		return 0, w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return 0, w.state.fail()
+	}
+	defer w.state.check(&err)
+
+	size := w.frame.Descriptor.Flags.BlockSizeIndex()
+	var done bool
+	var rn int
+	data := size.Get()
+	if w.isNotConcurrent() {
+		// Keep the same buffer for the whole process.
+		defer lz4block.Put(data)
+	}
+	for !done {
+		rn, err = io.ReadFull(r, data)
+		switch err {
+		case nil:
+		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+			done = true
+		default:
+			return
+		}
+		n += int64(rn)
+		err = w.write(data[:rn], true)
+		if err != nil {
+			return
+		}
+		w.handler(rn)
+		if !done && !w.isNotConcurrent() {
+			// The buffer will be returned automatically by go routines (safe=true)
+			// so get a new one fo the next round.
+			data = size.Get()
+		}
+	}
+	return
+}