[VOL-5486] Fix deprecated versions

Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go
new file mode 100644
index 0000000..773175d
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go
@@ -0,0 +1,7 @@
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x7FFFFFFF // 2GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go
new file mode 100644
index 0000000..9f27d91
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go
@@ -0,0 +1,7 @@
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go
new file mode 100644
index 0000000..773175d
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go
@@ -0,0 +1,7 @@
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x7FFFFFFF // 2GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go
new file mode 100644
index 0000000..9022f6b
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go
@@ -0,0 +1,9 @@
+//go:build arm64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go
new file mode 100644
index 0000000..3127752
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go
@@ -0,0 +1,9 @@
+//go:build loong64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go
new file mode 100644
index 0000000..d930f4e
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go
@@ -0,0 +1,9 @@
+//go:build mips64 || mips64le
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x8000000000 // 512GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go
new file mode 100644
index 0000000..8b19343
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go
@@ -0,0 +1,9 @@
+//go:build mips || mipsle
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x40000000 // 1GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go
new file mode 100644
index 0000000..a374e14
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go
@@ -0,0 +1,9 @@
+//go:build ppc
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x7FFFFFFF // 2GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go
new file mode 100644
index 0000000..80288a8
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go
@@ -0,0 +1,9 @@
+//go:build ppc64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go
new file mode 100644
index 0000000..77561d6
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go
@@ -0,0 +1,9 @@
+//go:build ppc64le
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go
new file mode 100644
index 0000000..2a876e5
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go
@@ -0,0 +1,9 @@
+//go:build riscv64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go
new file mode 100644
index 0000000..982cb75
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go
@@ -0,0 +1,9 @@
+//go:build s390x
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/vendor/go.etcd.io/bbolt/internal/common/bucket.go
new file mode 100644
index 0000000..2b4ab14
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bucket.go
@@ -0,0 +1,54 @@
+package common
+
+import (
+	"fmt"
+	"unsafe"
+)
+
+const BucketHeaderSize = int(unsafe.Sizeof(InBucket{}))
+
+// InBucket represents the on-file representation of a bucket.
+// This is stored as the "value" of a bucket key. If the bucket is small enough,
+// then its root page can be stored inline in the "value", after the bucket
+// header. In the case of inline buckets, the "root" will be 0.
+type InBucket struct {
+	root     Pgid   // page id of the bucket's root-level page
+	sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+func NewInBucket(root Pgid, seq uint64) InBucket {
+	return InBucket{
+		root:     root,
+		sequence: seq,
+	}
+}
+
+func (b *InBucket) RootPage() Pgid {
+	return b.root
+}
+
+func (b *InBucket) SetRootPage(id Pgid) {
+	b.root = id
+}
+
+// InSequence returns the sequence. The reason why not naming it `Sequence`
+// is to avoid duplicated name as `(*Bucket) Sequence()`
+func (b *InBucket) InSequence() uint64 {
+	return b.sequence
+}
+
+func (b *InBucket) SetInSequence(v uint64) {
+	b.sequence = v
+}
+
+func (b *InBucket) IncSequence() {
+	b.sequence++
+}
+
+func (b *InBucket) InlinePage(v []byte) *Page {
+	return (*Page)(unsafe.Pointer(&v[BucketHeaderSize]))
+}
+
+func (b *InBucket) String() string {
+	return fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/inode.go b/vendor/go.etcd.io/bbolt/internal/common/inode.go
new file mode 100644
index 0000000..080b9af
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/inode.go
@@ -0,0 +1,115 @@
+package common
+
+import "unsafe"
+
+// Inode represents an internal node inside of a node.
+// It can be used to point to elements in a page or point
+// to an element which hasn't been added to a page yet.
+type Inode struct {
+	flags uint32
+	pgid  Pgid
+	key   []byte
+	value []byte
+}
+
+type Inodes []Inode
+
+func (in *Inode) Flags() uint32 {
+	return in.flags
+}
+
+func (in *Inode) SetFlags(flags uint32) {
+	in.flags = flags
+}
+
+func (in *Inode) Pgid() Pgid {
+	return in.pgid
+}
+
+func (in *Inode) SetPgid(id Pgid) {
+	in.pgid = id
+}
+
+func (in *Inode) Key() []byte {
+	return in.key
+}
+
+func (in *Inode) SetKey(key []byte) {
+	in.key = key
+}
+
+func (in *Inode) Value() []byte {
+	return in.value
+}
+
+func (in *Inode) SetValue(value []byte) {
+	in.value = value
+}
+
+func ReadInodeFromPage(p *Page) Inodes {
+	inodes := make(Inodes, int(p.Count()))
+	isLeaf := p.IsLeafPage()
+	for i := 0; i < int(p.Count()); i++ {
+		inode := &inodes[i]
+		if isLeaf {
+			elem := p.LeafPageElement(uint16(i))
+			inode.SetFlags(elem.Flags())
+			inode.SetKey(elem.Key())
+			inode.SetValue(elem.Value())
+		} else {
+			elem := p.BranchPageElement(uint16(i))
+			inode.SetPgid(elem.Pgid())
+			inode.SetKey(elem.Key())
+		}
+		Assert(len(inode.Key()) > 0, "read: zero-length inode key")
+	}
+
+	return inodes
+}
+
+func WriteInodeToPage(inodes Inodes, p *Page) uint32 {
+	// Loop over each item and write it to the page.
+	// off tracks the offset into p of the start of the next data.
+	off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
+	isLeaf := p.IsLeafPage()
+	for i, item := range inodes {
+		Assert(len(item.Key()) > 0, "write: zero-length inode key")
+
+		// Create a slice to write into of needed size and advance
+		// byte pointer for next iteration.
+		sz := len(item.Key()) + len(item.Value())
+		b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
+		off += uintptr(sz)
+
+		// Write the page element.
+		if isLeaf {
+			elem := p.LeafPageElement(uint16(i))
+			elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
+			elem.SetFlags(item.Flags())
+			elem.SetKsize(uint32(len(item.Key())))
+			elem.SetVsize(uint32(len(item.Value())))
+		} else {
+			elem := p.BranchPageElement(uint16(i))
+			elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
+			elem.SetKsize(uint32(len(item.Key())))
+			elem.SetPgid(item.Pgid())
+			Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred")
+		}
+
+		// Write data for the element to the end of the page.
+		l := copy(b, item.Key())
+		copy(b[l:], item.Value())
+	}
+
+	return uint32(off)
+}
+
+func UsedSpaceInPage(inodes Inodes, p *Page) uint32 {
+	off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
+	for _, item := range inodes {
+		sz := len(item.Key()) + len(item.Value())
+		off += uintptr(sz)
+	}
+
+	return uint32(off)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/meta.go b/vendor/go.etcd.io/bbolt/internal/common/meta.go
new file mode 100644
index 0000000..0553886
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/meta.go
@@ -0,0 +1,161 @@
+package common
+
+import (
+	"fmt"
+	"hash/fnv"
+	"io"
+	"unsafe"
+
+	"go.etcd.io/bbolt/errors"
+)
+
+type Meta struct {
+	magic    uint32
+	version  uint32
+	pageSize uint32
+	flags    uint32
+	root     InBucket
+	freelist Pgid
+	pgid     Pgid
+	txid     Txid
+	checksum uint64
+}
+
+// Validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+func (m *Meta) Validate() error {
+	if m.magic != Magic {
+		return errors.ErrInvalid
+	} else if m.version != Version {
+		return errors.ErrVersionMismatch
+	} else if m.checksum != m.Sum64() {
+		return errors.ErrChecksum
+	}
+	return nil
+}
+
+// Copy copies one meta object to another.
+func (m *Meta) Copy(dest *Meta) {
+	*dest = *m
+}
+
+// Write writes the meta onto a page.
+func (m *Meta) Write(p *Page) {
+	if m.root.root >= m.pgid {
+		panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+	} else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist {
+		// TODO: reject pgidNoFreeList if !NoFreelistSync
+		panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+	}
+
+	// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+	p.id = Pgid(m.txid % 2)
+	p.SetFlags(MetaPageFlag)
+
+	// Calculate the checksum.
+	m.checksum = m.Sum64()
+
+	m.Copy(p.Meta())
+}
+
+// Sum64 generates the checksum for the meta.
+func (m *Meta) Sum64() uint64 {
+	var h = fnv.New64a()
+	_, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+	return h.Sum64()
+}
+
+func (m *Meta) Magic() uint32 {
+	return m.magic
+}
+
+func (m *Meta) SetMagic(v uint32) {
+	m.magic = v
+}
+
+func (m *Meta) Version() uint32 {
+	return m.version
+}
+
+func (m *Meta) SetVersion(v uint32) {
+	m.version = v
+}
+
+func (m *Meta) PageSize() uint32 {
+	return m.pageSize
+}
+
+func (m *Meta) SetPageSize(v uint32) {
+	m.pageSize = v
+}
+
+func (m *Meta) Flags() uint32 {
+	return m.flags
+}
+
+func (m *Meta) SetFlags(v uint32) {
+	m.flags = v
+}
+
+func (m *Meta) SetRootBucket(b InBucket) {
+	m.root = b
+}
+
+func (m *Meta) RootBucket() *InBucket {
+	return &m.root
+}
+
+func (m *Meta) Freelist() Pgid {
+	return m.freelist
+}
+
+func (m *Meta) SetFreelist(v Pgid) {
+	m.freelist = v
+}
+
+func (m *Meta) IsFreelistPersisted() bool {
+	return m.freelist != PgidNoFreelist
+}
+
+func (m *Meta) Pgid() Pgid {
+	return m.pgid
+}
+
+func (m *Meta) SetPgid(id Pgid) {
+	m.pgid = id
+}
+
+func (m *Meta) Txid() Txid {
+	return m.txid
+}
+
+func (m *Meta) SetTxid(id Txid) {
+	m.txid = id
+}
+
+func (m *Meta) IncTxid() {
+	m.txid += 1
+}
+
+func (m *Meta) DecTxid() {
+	m.txid -= 1
+}
+
+func (m *Meta) Checksum() uint64 {
+	return m.checksum
+}
+
+func (m *Meta) SetChecksum(v uint64) {
+	m.checksum = v
+}
+
+func (m *Meta) Print(w io.Writer) {
+	fmt.Fprintf(w, "Version:    %d\n", m.version)
+	fmt.Fprintf(w, "Page Size:  %d bytes\n", m.pageSize)
+	fmt.Fprintf(w, "Flags:      %08x\n", m.flags)
+	fmt.Fprintf(w, "Root:       <pgid=%d>\n", m.root.root)
+	fmt.Fprintf(w, "Freelist:   <pgid=%d>\n", m.freelist)
+	fmt.Fprintf(w, "HWM:        <pgid=%d>\n", m.pgid)
+	fmt.Fprintf(w, "Txn ID:     %d\n", m.txid)
+	fmt.Fprintf(w, "Checksum:   %016x\n", m.checksum)
+	fmt.Fprintf(w, "\n")
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/page.go b/vendor/go.etcd.io/bbolt/internal/common/page.go
new file mode 100644
index 0000000..ee80896
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/page.go
@@ -0,0 +1,391 @@
+package common
+
+import (
+	"fmt"
+	"os"
+	"sort"
+	"unsafe"
+)
+
+const PageHeaderSize = unsafe.Sizeof(Page{})
+
+const MinKeysPerPage = 2
+
+const BranchPageElementSize = unsafe.Sizeof(branchPageElement{})
+const LeafPageElementSize = unsafe.Sizeof(leafPageElement{})
+const pgidSize = unsafe.Sizeof(Pgid(0))
+
+const (
+	BranchPageFlag   = 0x01
+	LeafPageFlag     = 0x02
+	MetaPageFlag     = 0x04
+	FreelistPageFlag = 0x10
+)
+
+const (
+	BucketLeafFlag = 0x01
+)
+
+type Pgid uint64
+
+type Page struct {
+	id       Pgid
+	flags    uint16
+	count    uint16
+	overflow uint32
+}
+
+func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page {
+	return &Page{
+		id:       id,
+		flags:    flags,
+		count:    count,
+		overflow: overflow,
+	}
+}
+
+// Typ returns a human-readable page type string used for debugging.
+func (p *Page) Typ() string {
+	if p.IsBranchPage() {
+		return "branch"
+	} else if p.IsLeafPage() {
+		return "leaf"
+	} else if p.IsMetaPage() {
+		return "meta"
+	} else if p.IsFreelistPage() {
+		return "freelist"
+	}
+	return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+func (p *Page) IsBranchPage() bool {
+	return p.flags == BranchPageFlag
+}
+
+func (p *Page) IsLeafPage() bool {
+	return p.flags == LeafPageFlag
+}
+
+func (p *Page) IsMetaPage() bool {
+	return p.flags == MetaPageFlag
+}
+
+func (p *Page) IsFreelistPage() bool {
+	return p.flags == FreelistPageFlag
+}
+
+// Meta returns a pointer to the metadata section of the page.
+func (p *Page) Meta() *Meta {
+	return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
+}
+
+func (p *Page) FastCheck(id Pgid) {
+	Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
+	// Only one flag of page-type can be set.
+	Assert(p.IsBranchPage() ||
+		p.IsLeafPage() ||
+		p.IsMetaPage() ||
+		p.IsFreelistPage(),
+		"page %v: has unexpected type/flags: %x", p.id, p.flags)
+}
+
+// LeafPageElement retrieves the leaf node by index
+func (p *Page) LeafPageElement(index uint16) *leafPageElement {
+	return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+		LeafPageElementSize, int(index)))
+}
+
+// LeafPageElements retrieves a list of leaf nodes.
+func (p *Page) LeafPageElements() []leafPageElement {
+	if p.count == 0 {
+		return nil
+	}
+	data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+	elems := unsafe.Slice((*leafPageElement)(data), int(p.count))
+	return elems
+}
+
+// BranchPageElement retrieves the branch node by index
+func (p *Page) BranchPageElement(index uint16) *branchPageElement {
+	return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+		unsafe.Sizeof(branchPageElement{}), int(index)))
+}
+
+// BranchPageElements retrieves a list of branch nodes.
+func (p *Page) BranchPageElements() []branchPageElement {
+	if p.count == 0 {
+		return nil
+	}
+	data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+	elems := unsafe.Slice((*branchPageElement)(data), int(p.count))
+	return elems
+}
+
+func (p *Page) FreelistPageCount() (int, int) {
+	Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags))
+
+	// If the page.count is at the max uint16 value (64k) then it's considered
+	// an overflow and the size of the freelist is stored as the first element.
+	var idx, count = 0, int(p.count)
+	if count == 0xFFFF {
+		idx = 1
+		c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
+		count = int(c)
+		if count < 0 {
+			panic(fmt.Sprintf("leading element count %d overflows int", c))
+		}
+	}
+
+	return idx, count
+}
+
+func (p *Page) FreelistPageIds() []Pgid {
+	Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags))
+
+	idx, count := p.FreelistPageCount()
+
+	if count == 0 {
+		return nil
+	}
+
+	data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx)
+	ids := unsafe.Slice((*Pgid)(data), count)
+
+	return ids
+}
+
+// dump writes n bytes of the page to STDERR as hex output.
+func (p *Page) hexdump(n int) {
+	buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
+	fmt.Fprintf(os.Stderr, "%x\n", buf)
+}
+
+func (p *Page) PageElementSize() uintptr {
+	if p.IsLeafPage() {
+		return LeafPageElementSize
+	}
+	return BranchPageElementSize
+}
+
+func (p *Page) Id() Pgid {
+	return p.id
+}
+
+func (p *Page) SetId(target Pgid) {
+	p.id = target
+}
+
+func (p *Page) Flags() uint16 {
+	return p.flags
+}
+
+func (p *Page) SetFlags(v uint16) {
+	p.flags = v
+}
+
+func (p *Page) Count() uint16 {
+	return p.count
+}
+
+func (p *Page) SetCount(target uint16) {
+	p.count = target
+}
+
+func (p *Page) Overflow() uint32 {
+	return p.overflow
+}
+
+func (p *Page) SetOverflow(target uint32) {
+	p.overflow = target
+}
+
+func (p *Page) String() string {
+	return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow)
+}
+
+type Pages []*Page
+
+func (s Pages) Len() int           { return len(s) }
+func (s Pages) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id }
+
+// branchPageElement represents a node on a branch page.
+type branchPageElement struct {
+	pos   uint32
+	ksize uint32
+	pgid  Pgid
+}
+
+func (n *branchPageElement) Pos() uint32 {
+	return n.pos
+}
+
+func (n *branchPageElement) SetPos(v uint32) {
+	n.pos = v
+}
+
+func (n *branchPageElement) Ksize() uint32 {
+	return n.ksize
+}
+
+func (n *branchPageElement) SetKsize(v uint32) {
+	n.ksize = v
+}
+
+func (n *branchPageElement) Pgid() Pgid {
+	return n.pgid
+}
+
+func (n *branchPageElement) SetPgid(v Pgid) {
+	n.pgid = v
+}
+
+// Key returns a byte slice of the node key.
+func (n *branchPageElement) Key() []byte {
+	return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+	flags uint32
+	pos   uint32
+	ksize uint32
+	vsize uint32
+}
+
+func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement {
+	return &leafPageElement{
+		flags: flags,
+		pos:   pos,
+		ksize: ksize,
+		vsize: vsize,
+	}
+}
+
+func (n *leafPageElement) Flags() uint32 {
+	return n.flags
+}
+
+func (n *leafPageElement) SetFlags(v uint32) {
+	n.flags = v
+}
+
+func (n *leafPageElement) Pos() uint32 {
+	return n.pos
+}
+
+func (n *leafPageElement) SetPos(v uint32) {
+	n.pos = v
+}
+
+func (n *leafPageElement) Ksize() uint32 {
+	return n.ksize
+}
+
+func (n *leafPageElement) SetKsize(v uint32) {
+	n.ksize = v
+}
+
+func (n *leafPageElement) Vsize() uint32 {
+	return n.vsize
+}
+
+func (n *leafPageElement) SetVsize(v uint32) {
+	n.vsize = v
+}
+
+// Key returns a byte slice of the node key.
+func (n *leafPageElement) Key() []byte {
+	i := int(n.pos)
+	j := i + int(n.ksize)
+	return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
+}
+
+// Value returns a byte slice of the node value.
+func (n *leafPageElement) Value() []byte {
+	i := int(n.pos) + int(n.ksize)
+	j := i + int(n.vsize)
+	return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
+}
+
+func (n *leafPageElement) IsBucketEntry() bool {
+	return n.flags&uint32(BucketLeafFlag) != 0
+}
+
+func (n *leafPageElement) Bucket() *InBucket {
+	if n.IsBucketEntry() {
+		return LoadBucket(n.Value())
+	} else {
+		return nil
+	}
+}
+
+// PageInfo represents human readable information about a page.
+type PageInfo struct {
+	ID            int
+	Type          string
+	Count         int
+	OverflowCount int
+}
+
+type Pgids []Pgid
+
+func (s Pgids) Len() int           { return len(s) }
+func (s Pgids) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s Pgids) Less(i, j int) bool { return s[i] < s[j] }
+
+// Merge returns the sorted union of a and b.
+func (a Pgids) Merge(b Pgids) Pgids {
+	// Return the opposite slice if one is nil.
+	if len(a) == 0 {
+		return b
+	}
+	if len(b) == 0 {
+		return a
+	}
+	merged := make(Pgids, len(a)+len(b))
+	Mergepgids(merged, a, b)
+	return merged
+}
+
+// Mergepgids copies the sorted union of a and b into dst.
+// If dst is too small, it panics.
+func Mergepgids(dst, a, b Pgids) {
+	if len(dst) < len(a)+len(b) {
+		panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
+	}
+	// Copy in the opposite slice if one is nil.
+	if len(a) == 0 {
+		copy(dst, b)
+		return
+	}
+	if len(b) == 0 {
+		copy(dst, a)
+		return
+	}
+
+	// Merged will hold all elements from both lists.
+	merged := dst[:0]
+
+	// Assign lead to the slice with a lower starting value, follow to the higher value.
+	lead, follow := a, b
+	if b[0] < a[0] {
+		lead, follow = b, a
+	}
+
+	// Continue while there are elements in the lead.
+	for len(lead) > 0 {
+		// Merge largest prefix of lead that is ahead of follow[0].
+		n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+		merged = append(merged, lead[:n]...)
+		if n >= len(lead) {
+			break
+		}
+
+		// Swap lead and follow.
+		lead, follow = follow, lead[n:]
+	}
+
+	// Append what's left in follow.
+	_ = append(merged, follow...)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go
new file mode 100644
index 0000000..18d6d69
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/types.go
@@ -0,0 +1,37 @@
+package common
+
+import (
+	"os"
+	"runtime"
+	"time"
+)
+
+// MaxMmapStep is the largest step that can be taken when remapping the mmap.
+const MaxMmapStep = 1 << 30 // 1GB
+
+// Version represents the data file format version.
+const Version uint32 = 2
+
+// Magic represents a marker value to indicate that a file is a Bolt DB.
+const Magic uint32 = 0xED0CDAED
+
+const PgidNoFreelist Pgid = 0xffffffffffffffff
+
+// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
+// syncing changes to a file.  This is required as some operating systems,
+// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
+// must be synchronized using the msync(2) syscall.
+const IgnoreNoSync = runtime.GOOS == "openbsd"
+
+// Default values if not set in a DB instance.
+const (
+	DefaultMaxBatchSize  int = 1000
+	DefaultMaxBatchDelay     = 10 * time.Millisecond
+	DefaultAllocSize         = 16 * 1024 * 1024
+)
+
+// DefaultPageSize is the default page size for db which is set to the OS page size.
+var DefaultPageSize = os.Getpagesize()
+
+// Txid represents the internal transaction identifier.
+type Txid uint64
diff --git a/vendor/go.etcd.io/bbolt/internal/common/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go
new file mode 100644
index 0000000..740ffc7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go
@@ -0,0 +1,27 @@
+package common
+
+import (
+	"unsafe"
+)
+
+func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(base) + offset)
+}
+
+func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
+}
+
+func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
+	// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
+	//
+	// This memory is not allocated from C, but it is unmanaged by Go's
+	// garbage collector and should behave similarly, and the compiler
+	// should produce similar code.  Note that this conversion allows a
+	// subslice to begin after the base address, with an optional offset,
+	// while the URL above does not cover this case and only slices from
+	// index 0.  However, the wiki never says that the address must be to
+	// the beginning of a C allocation (or even that malloc was used at
+	// all), so this is believed to be correct.
+	return (*[MaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j]
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/utils.go b/vendor/go.etcd.io/bbolt/internal/common/utils.go
new file mode 100644
index 0000000..bdf82a7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/utils.go
@@ -0,0 +1,64 @@
+package common
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"unsafe"
+)
+
+func LoadBucket(buf []byte) *InBucket {
+	return (*InBucket)(unsafe.Pointer(&buf[0]))
+}
+
+func LoadPage(buf []byte) *Page {
+	return (*Page)(unsafe.Pointer(&buf[0]))
+}
+
+func LoadPageMeta(buf []byte) *Meta {
+	return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize]))
+}
+
+func CopyFile(srcPath, dstPath string) error {
+	// Ensure source file exists.
+	_, err := os.Stat(srcPath)
+	if os.IsNotExist(err) {
+		return fmt.Errorf("source file %q not found", srcPath)
+	} else if err != nil {
+		return err
+	}
+
+	// Ensure output file not exist.
+	_, err = os.Stat(dstPath)
+	if err == nil {
+		return fmt.Errorf("output file %q already exists", dstPath)
+	} else if !os.IsNotExist(err) {
+		return err
+	}
+
+	srcDB, err := os.Open(srcPath)
+	if err != nil {
+		return fmt.Errorf("failed to open source file %q: %w", srcPath, err)
+	}
+	defer srcDB.Close()
+	dstDB, err := os.Create(dstPath)
+	if err != nil {
+		return fmt.Errorf("failed to create output file %q: %w", dstPath, err)
+	}
+	defer dstDB.Close()
+	written, err := io.Copy(dstDB, srcDB)
+	if err != nil {
+		return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err)
+	}
+
+	srcFi, err := srcDB.Stat()
+	if err != nil {
+		return fmt.Errorf("failed to get source file info %q: %w", srcPath, err)
+	}
+	initialSize := srcFi.Size()
+	if initialSize != written {
+		return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize)
+	}
+
+	return nil
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/verify.go b/vendor/go.etcd.io/bbolt/internal/common/verify.go
new file mode 100644
index 0000000..eac95e2
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/verify.go
@@ -0,0 +1,67 @@
+// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go
+package common
+
+import (
+	"fmt"
+	"os"
+	"strings"
+)
+
+const ENV_VERIFY = "BBOLT_VERIFY"
+
+type VerificationType string
+
+const (
+	ENV_VERIFY_VALUE_ALL    VerificationType = "all"
+	ENV_VERIFY_VALUE_ASSERT VerificationType = "assert"
+)
+
+func getEnvVerify() string {
+	return strings.ToLower(os.Getenv(ENV_VERIFY))
+}
+
+func IsVerificationEnabled(verification VerificationType) bool {
+	env := getEnvVerify()
+	return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification))
+}
+
+// EnableVerifications sets `ENV_VERIFY` and returns a function that
+// can be used to bring the original settings.
+func EnableVerifications(verification VerificationType) func() {
+	previousEnv := getEnvVerify()
+	os.Setenv(ENV_VERIFY, string(verification))
+	return func() {
+		os.Setenv(ENV_VERIFY, previousEnv)
+	}
+}
+
+// EnableAllVerifications enables verification and returns a function
+// that can be used to bring the original settings.
+func EnableAllVerifications() func() {
+	return EnableVerifications(ENV_VERIFY_VALUE_ALL)
+}
+
+// DisableVerifications unsets `ENV_VERIFY` and returns a function that
+// can be used to bring the original settings.
+func DisableVerifications() func() {
+	previousEnv := getEnvVerify()
+	os.Unsetenv(ENV_VERIFY)
+	return func() {
+		os.Setenv(ENV_VERIFY, previousEnv)
+	}
+}
+
+// Verify performs verification if the assertions are enabled.
+// In the default setup running in tests and skipped in the production code.
+func Verify(f func()) {
+	if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) {
+		f()
+	}
+}
+
+// Assert will panic with a given formatted message if the given condition is false.
+func Assert(condition bool, msg string, v ...any) {
+	if !condition {
+		panic(fmt.Sprintf("assertion failed: "+msg, v...))
+	}
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/vendor/go.etcd.io/bbolt/internal/freelist/array.go
new file mode 100644
index 0000000..0cc1ba7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/array.go
@@ -0,0 +1,108 @@
+package freelist
+
+import (
+	"fmt"
+	"sort"
+
+	"go.etcd.io/bbolt/internal/common"
+)
+
+type array struct {
+	*shared
+
+	ids []common.Pgid // all free and available free page ids.
+}
+
+func (f *array) Init(ids common.Pgids) {
+	f.ids = ids
+	f.reindex()
+}
+
+func (f *array) Allocate(txid common.Txid, n int) common.Pgid {
+	if len(f.ids) == 0 {
+		return 0
+	}
+
+	var initial, previd common.Pgid
+	for i, id := range f.ids {
+		if id <= 1 {
+			panic(fmt.Sprintf("invalid page allocation: %d", id))
+		}
+
+		// Reset initial page if this is not contiguous.
+		if previd == 0 || id-previd != 1 {
+			initial = id
+		}
+
+		// If we found a contiguous block then remove it and return it.
+		if (id-initial)+1 == common.Pgid(n) {
+			// If we're allocating off the beginning then take the fast path
+			// and just adjust the existing slice. This will use extra memory
+			// temporarily but the append() in free() will realloc the slice
+			// as is necessary.
+			if (i + 1) == n {
+				f.ids = f.ids[i+1:]
+			} else {
+				copy(f.ids[i-n+1:], f.ids[i+1:])
+				f.ids = f.ids[:len(f.ids)-n]
+			}
+
+			// Remove from the free cache.
+			for i := common.Pgid(0); i < common.Pgid(n); i++ {
+				delete(f.cache, initial+i)
+			}
+			f.allocs[initial] = txid
+			return initial
+		}
+
+		previd = id
+	}
+	return 0
+}
+
+func (f *array) FreeCount() int {
+	return len(f.ids)
+}
+
+func (f *array) freePageIds() common.Pgids {
+	return f.ids
+}
+
+func (f *array) mergeSpans(ids common.Pgids) {
+	sort.Sort(ids)
+	common.Verify(func() {
+		idsIdx := make(map[common.Pgid]struct{})
+		for _, id := range f.ids {
+			// The existing f.ids shouldn't have duplicated free ID.
+			if _, ok := idsIdx[id]; ok {
+				panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids))
+			}
+			idsIdx[id] = struct{}{}
+		}
+
+		prev := common.Pgid(0)
+		for _, id := range ids {
+			// The ids shouldn't have duplicated free ID. Note page 0 and 1
+			// are reserved for meta pages, so they can never be free page IDs.
+			if prev == id {
+				panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
+			}
+			prev = id
+
+			// The ids shouldn't have any overlap with the existing f.ids.
+			if _, ok := idsIdx[id]; ok {
+				panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids))
+			}
+		}
+	})
+	f.ids = common.Pgids(f.ids).Merge(ids)
+}
+
+func NewArrayFreelist() Interface {
+	a := &array{
+		shared: newShared(),
+		ids:    []common.Pgid{},
+	}
+	a.Interface = a
+	return a
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go
new file mode 100644
index 0000000..2b81950
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go
@@ -0,0 +1,82 @@
+package freelist
+
+import (
+	"go.etcd.io/bbolt/internal/common"
+)
+
+type ReadWriter interface {
+	// Read calls Init with the page ids stored in the given page.
+	Read(page *common.Page)
+
+	// Write writes the freelist into the given page.
+	Write(page *common.Page)
+
+	// EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write.
+	// This should never underestimate the size.
+	EstimatedWritePageSize() int
+}
+
+type Interface interface {
+	ReadWriter
+
+	// Init initializes this freelist with the given list of pages.
+	Init(ids common.Pgids)
+
+	// Allocate tries to allocate the given number of contiguous pages
+	// from the free list pages. It returns the starting page ID if
+	// available; otherwise, it returns 0.
+	Allocate(txid common.Txid, numPages int) common.Pgid
+
+	// Count returns the number of free and pending pages.
+	Count() int
+
+	// FreeCount returns the number of free pages.
+	FreeCount() int
+
+	// PendingCount returns the number of pending pages.
+	PendingCount() int
+
+	// AddReadonlyTXID adds a given read-only transaction id for pending page tracking.
+	AddReadonlyTXID(txid common.Txid)
+
+	// RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking.
+	RemoveReadonlyTXID(txid common.Txid)
+
+	// ReleasePendingPages releases any pages associated with closed read-only transactions.
+	ReleasePendingPages()
+
+	// Free releases a page and its overflow for a given transaction id.
+	// If the page is already free or is one of the meta pages, then a panic will occur.
+	Free(txId common.Txid, p *common.Page)
+
+	// Freed returns whether a given page is in the free list.
+	Freed(pgId common.Pgid) bool
+
+	// Rollback removes the pages from a given pending tx.
+	Rollback(txId common.Txid)
+
+	// Copyall copies a list of all free ids and all pending ids in one sorted list.
+	// f.count returns the minimum length required for dst.
+	Copyall(dst []common.Pgid)
+
+	// Reload reads the freelist from a page and filters out pending items.
+	Reload(p *common.Page)
+
+	// NoSyncReload reads the freelist from Pgids and filters out pending items.
+	NoSyncReload(pgIds common.Pgids)
+
+	// freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available.
+	freePageIds() common.Pgids
+
+	// pendingPageIds returns all pending pages by transaction id.
+	pendingPageIds() map[common.Txid]*txPending
+
+	// release moves all page ids for a transaction id (or older) to the freelist.
+	release(txId common.Txid)
+
+	// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
+	releaseRange(begin, end common.Txid)
+
+	// mergeSpans is merging the given pages into the freelist
+	mergeSpans(ids common.Pgids)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go
new file mode 100644
index 0000000..8d471f4
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go
@@ -0,0 +1,292 @@
+package freelist
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+
+	"go.etcd.io/bbolt/internal/common"
+)
+
+// pidSet holds the set of starting pgids which have the same span size
+type pidSet map[common.Pgid]struct{}
+
+type hashMap struct {
+	*shared
+
+	freePagesCount uint64                 // count of free pages(hashmap version)
+	freemaps       map[uint64]pidSet      // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
+	forwardMap     map[common.Pgid]uint64 // key is start pgid, value is its span size
+	backwardMap    map[common.Pgid]uint64 // key is end pgid, value is its span size
+}
+
+func (f *hashMap) Init(pgids common.Pgids) {
+	// reset the counter when freelist init
+	f.freePagesCount = 0
+	f.freemaps = make(map[uint64]pidSet)
+	f.forwardMap = make(map[common.Pgid]uint64)
+	f.backwardMap = make(map[common.Pgid]uint64)
+
+	if len(pgids) == 0 {
+		return
+	}
+
+	if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
+		panic("pgids not sorted")
+	}
+
+	size := uint64(1)
+	start := pgids[0]
+
+	for i := 1; i < len(pgids); i++ {
+		// continuous page
+		if pgids[i] == pgids[i-1]+1 {
+			size++
+		} else {
+			f.addSpan(start, size)
+
+			size = 1
+			start = pgids[i]
+		}
+	}
+
+	// init the tail
+	if size != 0 && start != 0 {
+		f.addSpan(start, size)
+	}
+
+	f.reindex()
+}
+
+func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid {
+	if n == 0 {
+		return 0
+	}
+
+	// if we have a exact size match just return short path
+	if bm, ok := f.freemaps[uint64(n)]; ok {
+		for pid := range bm {
+			// remove the span
+			f.delSpan(pid, uint64(n))
+
+			f.allocs[pid] = txid
+
+			for i := common.Pgid(0); i < common.Pgid(n); i++ {
+				delete(f.cache, pid+i)
+			}
+			return pid
+		}
+	}
+
+	// lookup the map to find larger span
+	for size, bm := range f.freemaps {
+		if size < uint64(n) {
+			continue
+		}
+
+		for pid := range bm {
+			// remove the initial
+			f.delSpan(pid, size)
+
+			f.allocs[pid] = txid
+
+			remain := size - uint64(n)
+
+			// add remain span
+			f.addSpan(pid+common.Pgid(n), remain)
+
+			for i := common.Pgid(0); i < common.Pgid(n); i++ {
+				delete(f.cache, pid+i)
+			}
+			return pid
+		}
+	}
+
+	return 0
+}
+
+func (f *hashMap) FreeCount() int {
+	common.Verify(func() {
+		expectedFreePageCount := f.hashmapFreeCountSlow()
+		common.Assert(int(f.freePagesCount) == expectedFreePageCount,
+			"freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount)
+	})
+	return int(f.freePagesCount)
+}
+
+func (f *hashMap) freePageIds() common.Pgids {
+	count := f.FreeCount()
+	if count == 0 {
+		return common.Pgids{}
+	}
+
+	m := make([]common.Pgid, 0, count)
+
+	startPageIds := make([]common.Pgid, 0, len(f.forwardMap))
+	for k := range f.forwardMap {
+		startPageIds = append(startPageIds, k)
+	}
+	sort.Sort(common.Pgids(startPageIds))
+
+	for _, start := range startPageIds {
+		if size, ok := f.forwardMap[start]; ok {
+			for i := 0; i < int(size); i++ {
+				m = append(m, start+common.Pgid(i))
+			}
+		}
+	}
+
+	return m
+}
+
+func (f *hashMap) hashmapFreeCountSlow() int {
+	count := 0
+	for _, size := range f.forwardMap {
+		count += int(size)
+	}
+	return count
+}
+
+func (f *hashMap) addSpan(start common.Pgid, size uint64) {
+	f.backwardMap[start-1+common.Pgid(size)] = size
+	f.forwardMap[start] = size
+	if _, ok := f.freemaps[size]; !ok {
+		f.freemaps[size] = make(map[common.Pgid]struct{})
+	}
+
+	f.freemaps[size][start] = struct{}{}
+	f.freePagesCount += size
+}
+
+func (f *hashMap) delSpan(start common.Pgid, size uint64) {
+	delete(f.forwardMap, start)
+	delete(f.backwardMap, start+common.Pgid(size-1))
+	delete(f.freemaps[size], start)
+	if len(f.freemaps[size]) == 0 {
+		delete(f.freemaps, size)
+	}
+	f.freePagesCount -= size
+}
+
+func (f *hashMap) mergeSpans(ids common.Pgids) {
+	common.Verify(func() {
+		ids1Freemap := f.idsFromFreemaps()
+		ids2Forward := f.idsFromForwardMap()
+		ids3Backward := f.idsFromBackwardMap()
+
+		if !reflect.DeepEqual(ids1Freemap, ids2Forward) {
+			panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap))
+		}
+		if !reflect.DeepEqual(ids1Freemap, ids3Backward) {
+			panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap))
+		}
+
+		sort.Sort(ids)
+		prev := common.Pgid(0)
+		for _, id := range ids {
+			// The ids shouldn't have duplicated free ID.
+			if prev == id {
+				panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
+			}
+			prev = id
+
+			// The ids shouldn't have any overlap with the existing f.freemaps.
+			if _, ok := ids1Freemap[id]; ok {
+				panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps))
+			}
+		}
+	})
+	for _, id := range ids {
+		// try to see if we can merge and update
+		f.mergeWithExistingSpan(id)
+	}
+}
+
+// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
+func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) {
+	prev := pid - 1
+	next := pid + 1
+
+	preSize, mergeWithPrev := f.backwardMap[prev]
+	nextSize, mergeWithNext := f.forwardMap[next]
+	newStart := pid
+	newSize := uint64(1)
+
+	if mergeWithPrev {
+		//merge with previous span
+		start := prev + 1 - common.Pgid(preSize)
+		f.delSpan(start, preSize)
+
+		newStart -= common.Pgid(preSize)
+		newSize += preSize
+	}
+
+	if mergeWithNext {
+		// merge with next span
+		f.delSpan(next, nextSize)
+		newSize += nextSize
+	}
+
+	f.addSpan(newStart, newSize)
+}
+
+// idsFromFreemaps get all free page IDs from f.freemaps.
+// used by test only.
+func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} {
+	ids := make(map[common.Pgid]struct{})
+	for size, idSet := range f.freemaps {
+		for start := range idSet {
+			for i := 0; i < int(size); i++ {
+				id := start + common.Pgid(i)
+				if _, ok := ids[id]; ok {
+					panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps))
+				}
+				ids[id] = struct{}{}
+			}
+		}
+	}
+	return ids
+}
+
+// idsFromForwardMap get all free page IDs from f.forwardMap.
+// used by test only.
+func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} {
+	ids := make(map[common.Pgid]struct{})
+	for start, size := range f.forwardMap {
+		for i := 0; i < int(size); i++ {
+			id := start + common.Pgid(i)
+			if _, ok := ids[id]; ok {
+				panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap))
+			}
+			ids[id] = struct{}{}
+		}
+	}
+	return ids
+}
+
+// idsFromBackwardMap get all free page IDs from f.backwardMap.
+// used by test only.
+func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} {
+	ids := make(map[common.Pgid]struct{})
+	for end, size := range f.backwardMap {
+		for i := 0; i < int(size); i++ {
+			id := end - common.Pgid(i)
+			if _, ok := ids[id]; ok {
+				panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap))
+			}
+			ids[id] = struct{}{}
+		}
+	}
+	return ids
+}
+
+func NewHashMapFreelist() Interface {
+	hm := &hashMap{
+		shared:      newShared(),
+		freemaps:    make(map[uint64]pidSet),
+		forwardMap:  make(map[common.Pgid]uint64),
+		backwardMap: make(map[common.Pgid]uint64),
+	}
+	hm.Interface = hm
+	return hm
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go
new file mode 100644
index 0000000..f2d1130
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go
@@ -0,0 +1,310 @@
+package freelist
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"unsafe"
+
+	"go.etcd.io/bbolt/internal/common"
+)
+
+type txPending struct {
+	ids              []common.Pgid
+	alloctx          []common.Txid // txids allocating the ids
+	lastReleaseBegin common.Txid   // beginning txid of last matching releaseRange
+}
+
+type shared struct {
+	Interface
+
+	readonlyTXIDs []common.Txid               // all readonly transaction IDs.
+	allocs        map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid.
+	cache         map[common.Pgid]struct{}    // fast lookup of all free and pending page ids.
+	pending       map[common.Txid]*txPending  // mapping of soon-to-be free page ids by tx.
+}
+
+func newShared() *shared {
+	return &shared{
+		pending: make(map[common.Txid]*txPending),
+		allocs:  make(map[common.Pgid]common.Txid),
+		cache:   make(map[common.Pgid]struct{}),
+	}
+}
+
+func (t *shared) pendingPageIds() map[common.Txid]*txPending {
+	return t.pending
+}
+
+func (t *shared) PendingCount() int {
+	var count int
+	for _, txp := range t.pending {
+		count += len(txp.ids)
+	}
+	return count
+}
+
+func (t *shared) Count() int {
+	return t.FreeCount() + t.PendingCount()
+}
+
+func (t *shared) Freed(pgId common.Pgid) bool {
+	_, ok := t.cache[pgId]
+	return ok
+}
+
+func (t *shared) Free(txid common.Txid, p *common.Page) {
+	if p.Id() <= 1 {
+		panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id()))
+	}
+
+	// Free page and all its overflow pages.
+	txp := t.pending[txid]
+	if txp == nil {
+		txp = &txPending{}
+		t.pending[txid] = txp
+	}
+	allocTxid, ok := t.allocs[p.Id()]
+	common.Verify(func() {
+		if allocTxid == txid {
+			panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid))
+		}
+	})
+	if ok {
+		delete(t.allocs, p.Id())
+	}
+
+	for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ {
+		// Verify that page is not already free.
+		if _, ok := t.cache[id]; ok {
+			panic(fmt.Sprintf("page %d already freed", id))
+		}
+		// Add to the freelist and cache.
+		txp.ids = append(txp.ids, id)
+		txp.alloctx = append(txp.alloctx, allocTxid)
+		t.cache[id] = struct{}{}
+	}
+}
+
+func (t *shared) Rollback(txid common.Txid) {
+	// Remove page ids from cache.
+	txp := t.pending[txid]
+	if txp == nil {
+		return
+	}
+	for i, pgid := range txp.ids {
+		delete(t.cache, pgid)
+		tx := txp.alloctx[i]
+		if tx == 0 {
+			continue
+		}
+		if tx != txid {
+			// Pending free aborted; restore page back to alloc list.
+			t.allocs[pgid] = tx
+		} else {
+			// A writing TXN should never free a page which was allocated by itself.
+			panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid))
+		}
+	}
+	// Remove pages from pending list and mark as free if allocated by txid.
+	delete(t.pending, txid)
+
+	// Remove pgids which are allocated by this txid
+	for pgid, tid := range t.allocs {
+		if tid == txid {
+			delete(t.allocs, pgid)
+		}
+	}
+}
+
+func (t *shared) AddReadonlyTXID(tid common.Txid) {
+	t.readonlyTXIDs = append(t.readonlyTXIDs, tid)
+}
+
+func (t *shared) RemoveReadonlyTXID(tid common.Txid) {
+	for i := range t.readonlyTXIDs {
+		if t.readonlyTXIDs[i] == tid {
+			last := len(t.readonlyTXIDs) - 1
+			t.readonlyTXIDs[i] = t.readonlyTXIDs[last]
+			t.readonlyTXIDs = t.readonlyTXIDs[:last]
+			break
+		}
+	}
+}
+
+type txIDx []common.Txid
+
+func (t txIDx) Len() int           { return len(t) }
+func (t txIDx) Swap(i, j int)      { t[i], t[j] = t[j], t[i] }
+func (t txIDx) Less(i, j int) bool { return t[i] < t[j] }
+
+func (t *shared) ReleasePendingPages() {
+	// Free all pending pages prior to the earliest open transaction.
+	sort.Sort(txIDx(t.readonlyTXIDs))
+	minid := common.Txid(math.MaxUint64)
+	if len(t.readonlyTXIDs) > 0 {
+		minid = t.readonlyTXIDs[0]
+	}
+	if minid > 0 {
+		t.release(minid - 1)
+	}
+	// Release unused txid extents.
+	for _, tid := range t.readonlyTXIDs {
+		t.releaseRange(minid, tid-1)
+		minid = tid + 1
+	}
+	t.releaseRange(minid, common.Txid(math.MaxUint64))
+	// Any page both allocated and freed in an extent is safe to release.
+}
+
+func (t *shared) release(txid common.Txid) {
+	m := make(common.Pgids, 0)
+	for tid, txp := range t.pending {
+		if tid <= txid {
+			// Move transaction's pending pages to the available freelist.
+			// Don't remove from the cache since the page is still free.
+			m = append(m, txp.ids...)
+			delete(t.pending, tid)
+		}
+	}
+	t.mergeSpans(m)
+}
+
+func (t *shared) releaseRange(begin, end common.Txid) {
+	if begin > end {
+		return
+	}
+	m := common.Pgids{}
+	for tid, txp := range t.pending {
+		if tid < begin || tid > end {
+			continue
+		}
+		// Don't recompute freed pages if ranges haven't updated.
+		if txp.lastReleaseBegin == begin {
+			continue
+		}
+		for i := 0; i < len(txp.ids); i++ {
+			if atx := txp.alloctx[i]; atx < begin || atx > end {
+				continue
+			}
+			m = append(m, txp.ids[i])
+			txp.ids[i] = txp.ids[len(txp.ids)-1]
+			txp.ids = txp.ids[:len(txp.ids)-1]
+			txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
+			txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
+			i--
+		}
+		txp.lastReleaseBegin = begin
+		if len(txp.ids) == 0 {
+			delete(t.pending, tid)
+		}
+	}
+	t.mergeSpans(m)
+}
+
+// Copyall copies a list of all free ids and all pending ids in one sorted list.
+// f.count returns the minimum length required for dst.
+func (t *shared) Copyall(dst []common.Pgid) {
+	m := make(common.Pgids, 0, t.PendingCount())
+	for _, txp := range t.pendingPageIds() {
+		m = append(m, txp.ids...)
+	}
+	sort.Sort(m)
+	common.Mergepgids(dst, t.freePageIds(), m)
+}
+
+func (t *shared) Reload(p *common.Page) {
+	t.Read(p)
+	t.NoSyncReload(t.freePageIds())
+}
+
+func (t *shared) NoSyncReload(pgIds common.Pgids) {
+	// Build a cache of only pending pages.
+	pcache := make(map[common.Pgid]bool)
+	for _, txp := range t.pending {
+		for _, pendingID := range txp.ids {
+			pcache[pendingID] = true
+		}
+	}
+
+	// Check each page in the freelist and build a new available freelist
+	// with any pages not in the pending lists.
+	a := []common.Pgid{}
+	for _, id := range pgIds {
+		if !pcache[id] {
+			a = append(a, id)
+		}
+	}
+
+	t.Init(a)
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (t *shared) reindex() {
+	free := t.freePageIds()
+	pending := t.pendingPageIds()
+	t.cache = make(map[common.Pgid]struct{}, len(free))
+	for _, id := range free {
+		t.cache[id] = struct{}{}
+	}
+	for _, txp := range pending {
+		for _, pendingID := range txp.ids {
+			t.cache[pendingID] = struct{}{}
+		}
+	}
+}
+
+func (t *shared) Read(p *common.Page) {
+	if !p.IsFreelistPage() {
+		panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ()))
+	}
+
+	ids := p.FreelistPageIds()
+
+	// Copy the list of page ids from the freelist.
+	if len(ids) == 0 {
+		t.Init([]common.Pgid{})
+	} else {
+		// copy the ids, so we don't modify on the freelist page directly
+		idsCopy := make([]common.Pgid, len(ids))
+		copy(idsCopy, ids)
+		// Make sure they're sorted.
+		sort.Sort(common.Pgids(idsCopy))
+
+		t.Init(idsCopy)
+	}
+}
+
+func (t *shared) EstimatedWritePageSize() int {
+	n := t.Count()
+	if n >= 0xFFFF {
+		// The first element will be used to store the count. See freelist.write.
+		n++
+	}
+	return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n)
+}
+
+func (t *shared) Write(p *common.Page) {
+	// Combine the old free pgids and pgids waiting on an open transaction.
+
+	// Update the header flag.
+	p.SetFlags(common.FreelistPageFlag)
+
+	// The page.count can only hold up to 64k elements so if we overflow that
+	// number then we handle it by putting the size in the first element.
+	l := t.Count()
+	if l == 0 {
+		p.SetCount(uint16(l))
+	} else if l < 0xFFFF {
+		p.SetCount(uint16(l))
+		data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+		ids := unsafe.Slice((*common.Pgid)(data), l)
+		t.Copyall(ids)
+	} else {
+		p.SetCount(0xFFFF)
+		data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+		ids := unsafe.Slice((*common.Pgid)(data), l+1)
+		ids[0] = common.Pgid(l)
+		t.Copyall(ids[1:])
+	}
+}