[VOL-5486] Fix deprecated versions

Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
deleted file mode 100644
index 4f2ee4d..0000000
--- a/vendor/github.com/google/btree/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
index 6062a4d..eab5dbf 100644
--- a/vendor/github.com/google/btree/README.md
+++ b/vendor/github.com/google/btree/README.md
@@ -1,7 +1,5 @@
 # BTree implementation for Go
 
-![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
-
 This package provides an in-memory B-Tree implementation for Go, useful as
 an ordered, mutable data structure.
 
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
index b83acdb..6f5184f 100644
--- a/vendor/github.com/google/btree/btree.go
+++ b/vendor/github.com/google/btree/btree.go
@@ -12,6 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !go1.18
+// +build !go1.18
+
 // Package btree implements in-memory B-Trees of arbitrary degree.
 //
 // btree implements an in-memory B-Tree for use as an ordered data structure.
@@ -476,7 +479,7 @@
 		child := n.mutableChild(i)
 		// merge with right child
 		mergeItem := n.items.removeAt(i)
-		mergeChild := n.children.removeAt(i + 1)
+		mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow)
 		child.items = append(child.items, mergeItem)
 		child.items = append(child.items, mergeChild.items...)
 		child.children = append(child.children, mergeChild.children...)
diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go
new file mode 100644
index 0000000..e44a0f4
--- /dev/null
+++ b/vendor/github.com/google/btree/btree_generic.go
@@ -0,0 +1,1083 @@
+// Copyright 2014-2022 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.18
+// +build go1.18
+
+// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific
+// instantiation of that generic for the Item interface, with a backwards-
+// compatible API.  Before go1.18, generics are not supported,
+// and BTree is just an implementation based around the Item interface.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children.  For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+//   * Due to the overhead of storing values as interfaces (each
+//     value needs to be stored as the value itself, then 2 words for the
+//     interface pointing to that value and its type), resulting in higher
+//     memory use.
+//   * Since interfaces can point to values anywhere in memory, values are
+//     most likely not stored in contiguous blocks, resulting in a higher
+//     number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+//
+// There are two implementations; those suffixed with 'G' are generics, usable
+// for any type, and require a passed-in "less" function to define their ordering.
+// Those without this prefix are specific to the 'Item' interface, and use
+// its 'Less' function for ordering.
+package btree
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+	// Less tests whether the current item is less than the given argument.
+	//
+	// This must provide a strict weak ordering.
+	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+	// hold one of either a or b in the tree).
+	Less(than Item) bool
+}
+
+const (
+	DefaultFreeListSize = 32
+)
+
+// FreeListG represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList, in particular when they're created with Clone.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeListG[T any] struct {
+	mu       sync.Mutex
+	freelist []*node[T]
+}
+
+// NewFreeListG creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeListG[T any](size int) *FreeListG[T] {
+	return &FreeListG[T]{freelist: make([]*node[T], 0, size)}
+}
+
+func (f *FreeListG[T]) newNode() (n *node[T]) {
+	f.mu.Lock()
+	index := len(f.freelist) - 1
+	if index < 0 {
+		f.mu.Unlock()
+		return new(node[T])
+	}
+	n = f.freelist[index]
+	f.freelist[index] = nil
+	f.freelist = f.freelist[:index]
+	f.mu.Unlock()
+	return
+}
+
+func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) {
+	f.mu.Lock()
+	if len(f.freelist) < cap(f.freelist) {
+		f.freelist = append(f.freelist, n)
+		out = true
+	}
+	f.mu.Unlock()
+	return
+}
+
+// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIteratorG[T any] func(item T) bool
+
+// Ordered represents the set of types for which the '<' operator work.
+type Ordered interface {
+	~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string
+}
+
+// Less[T] returns a default LessFunc that uses the '<' operator for types that support it.
+func Less[T Ordered]() LessFunc[T] {
+	return func(a, b T) bool { return a < b }
+}
+
+// NewOrderedG creates a new B-Tree for ordered types.
+func NewOrderedG[T Ordered](degree int) *BTreeG[T] {
+	return NewG[T](degree, Less[T]())
+}
+
+// NewG creates a new B-Tree with the given degree.
+//
+// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+//
+// The passed-in LessFunc determines how objects of type T are ordered.
+func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] {
+	return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize))
+}
+
+// NewWithFreeListG creates a new B-Tree that uses the given node free list.
+func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] {
+	if degree <= 1 {
+		panic("bad degree")
+	}
+	return &BTreeG[T]{
+		degree: degree,
+		cow:    &copyOnWriteContext[T]{freelist: f, less: less},
+	}
+}
+
+// items stores items in a node.
+type items[T any] []T
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items[T]) insertAt(index int, item T) {
+	var zero T
+	*s = append(*s, zero)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items[T]) removeAt(index int) T {
+	item := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	var zero T
+	(*s)[len(*s)-1] = zero
+	*s = (*s)[:len(*s)-1]
+	return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items[T]) pop() (out T) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	var zero T
+	(*s)[index] = zero
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items[T]) truncate(index int) {
+	var toClear items[T]
+	*s, toClear = (*s)[:index], (*s)[index:]
+	var zero T
+	for i := 0; i < len(toClear); i++ {
+		toClear[i] = zero
+	}
+}
+
+// find returns the index where the given item should be inserted into this
+// list.  'found' is true if the item already exists in the list at the given
+// index.
+func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) {
+	i := sort.Search(len(s), func(i int) bool {
+		return less(item, s[i])
+	})
+	if i > 0 && !less(s[i-1], item) {
+		return i - 1, true
+	}
+	return i, false
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+//   * len(children) == 0, len(items) unconstrained
+//   * len(children) == len(items) + 1
+type node[T any] struct {
+	items    items[T]
+	children items[*node[T]]
+	cow      *copyOnWriteContext[T]
+}
+
+func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] {
+	if n.cow == cow {
+		return n
+	}
+	out := cow.newNode()
+	if cap(out.items) >= len(n.items) {
+		out.items = out.items[:len(n.items)]
+	} else {
+		out.items = make(items[T], len(n.items), cap(n.items))
+	}
+	copy(out.items, n.items)
+	// Copy children
+	if cap(out.children) >= len(n.children) {
+		out.children = out.children[:len(n.children)]
+	} else {
+		out.children = make(items[*node[T]], len(n.children), cap(n.children))
+	}
+	copy(out.children, n.children)
+	return out
+}
+
+func (n *node[T]) mutableChild(i int) *node[T] {
+	c := n.children[i].mutableFor(n.cow)
+	n.children[i] = c
+	return c
+}
+
+// split splits the given node at the given index.  The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node[T]) split(i int) (T, *node[T]) {
+	item := n.items[i]
+	next := n.cow.newNode()
+	next.items = append(next.items, n.items[i+1:]...)
+	n.items.truncate(i)
+	if len(n.children) > 0 {
+		next.children = append(next.children, n.children[i+1:]...)
+		n.children.truncate(i + 1)
+	}
+	return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node[T]) maybeSplitChild(i, maxItems int) bool {
+	if len(n.children[i].items) < maxItems {
+		return false
+	}
+	first := n.mutableChild(i)
+	item, second := first.split(maxItems / 2)
+	n.items.insertAt(i, item)
+	n.children.insertAt(i+1, second)
+	return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) {
+	i, found := n.items.find(item, n.cow.less)
+	if found {
+		out := n.items[i]
+		n.items[i] = item
+		return out, true
+	}
+	if len(n.children) == 0 {
+		n.items.insertAt(i, item)
+		return
+	}
+	if n.maybeSplitChild(i, maxItems) {
+		inTree := n.items[i]
+		switch {
+		case n.cow.less(item, inTree):
+			// no change, we want first split node
+		case n.cow.less(inTree, item):
+			i++ // we want second split node
+		default:
+			out := n.items[i]
+			n.items[i] = item
+			return out, true
+		}
+	}
+	return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node[T]) get(key T) (_ T, _ bool) {
+	i, found := n.items.find(key, n.cow.less)
+	if found {
+		return n.items[i], true
+	} else if len(n.children) > 0 {
+		return n.children[i].get(key)
+	}
+	return
+}
+
+// min returns the first item in the subtree.
+func min[T any](n *node[T]) (_ T, found bool) {
+	if n == nil {
+		return
+	}
+	for len(n.children) > 0 {
+		n = n.children[0]
+	}
+	if len(n.items) == 0 {
+		return
+	}
+	return n.items[0], true
+}
+
+// max returns the last item in the subtree.
+func max[T any](n *node[T]) (_ T, found bool) {
+	if n == nil {
+		return
+	}
+	for len(n.children) > 0 {
+		n = n.children[len(n.children)-1]
+	}
+	if len(n.items) == 0 {
+		return
+	}
+	return n.items[len(n.items)-1], true
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+	removeItem toRemove = iota // removes the given item
+	removeMin                  // removes smallest item in the subtree
+	removeMax                  // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) {
+	var i int
+	var found bool
+	switch typ {
+	case removeMax:
+		if len(n.children) == 0 {
+			return n.items.pop(), true
+		}
+		i = len(n.items)
+	case removeMin:
+		if len(n.children) == 0 {
+			return n.items.removeAt(0), true
+		}
+		i = 0
+	case removeItem:
+		i, found = n.items.find(item, n.cow.less)
+		if len(n.children) == 0 {
+			if found {
+				return n.items.removeAt(i), true
+			}
+			return
+		}
+	default:
+		panic("invalid type")
+	}
+	// If we get to here, we have children.
+	if len(n.children[i].items) <= minItems {
+		return n.growChildAndRemove(i, item, minItems, typ)
+	}
+	child := n.mutableChild(i)
+	// Either we had enough items to begin with, or we've done some
+	// merging/stealing, because we've got enough now and we're ready to return
+	// stuff.
+	if found {
+		// The item exists at index 'i', and the child we've selected can give us a
+		// predecessor, since if we've gotten here it's got > minItems items in it.
+		out := n.items[i]
+		// We use our special-case 'remove' call with typ=maxItem to pull the
+		// predecessor of item i (the rightmost leaf of our immediate left child)
+		// and set it into where we pulled the item from.
+		var zero T
+		n.items[i], _ = child.remove(zero, minItems, removeMax)
+		return out, true
+	}
+	// Final recursive call.  Once we're here, we know that the item isn't in this
+	// node and that the child is big enough to remove from.
+	return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+//   1) item is in this node
+//   2) item is in child
+// In both cases, we need to handle the two subcases:
+//   A) node has enough values that it can spare one
+//   B) node doesn't have enough values
+// For the latter, we have to check:
+//   a) left sibling has node to spare
+//   b) right sibling has node to spare
+//   c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) {
+	if i > 0 && len(n.children[i-1].items) > minItems {
+		// Steal from left child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i - 1)
+		stolenItem := stealFrom.items.pop()
+		child.items.insertAt(0, n.items[i-1])
+		n.items[i-1] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children.insertAt(0, stealFrom.children.pop())
+		}
+	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+		// steal from right child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i + 1)
+		stolenItem := stealFrom.items.removeAt(0)
+		child.items = append(child.items, n.items[i])
+		n.items[i] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children = append(child.children, stealFrom.children.removeAt(0))
+		}
+	} else {
+		if i >= len(n.items) {
+			i--
+		}
+		child := n.mutableChild(i)
+		// merge with right child
+		mergeItem := n.items.removeAt(i)
+		mergeChild := n.children.removeAt(i + 1)
+		child.items = append(child.items, mergeItem)
+		child.items = append(child.items, mergeChild.items...)
+		child.children = append(child.children, mergeChild.children...)
+		n.cow.freeNode(mergeChild)
+	}
+	return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+	descend = direction(-1)
+	ascend  = direction(+1)
+)
+
+type optionalItem[T any] struct {
+	item  T
+	valid bool
+}
+
+func optional[T any](item T) optionalItem[T] {
+	return optionalItem[T]{item: item, valid: true}
+}
+func empty[T any]() optionalItem[T] {
+	return optionalItem[T]{}
+}
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) {
+	var ok, found bool
+	var index int
+	switch dir {
+	case ascend:
+		if start.valid {
+			index, _ = n.items.find(start.item, n.cow.less)
+		}
+		for i := index; i < len(n.items); i++ {
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) {
+				hit = true
+				continue
+			}
+			hit = true
+			if stop.valid && !n.cow.less(n.items[i], stop.item) {
+				return hit, false
+			}
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	case descend:
+		if start.valid {
+			index, found = n.items.find(start.item, n.cow.less)
+			if !found {
+				index = index - 1
+			}
+		} else {
+			index = len(n.items) - 1
+		}
+		for i := index; i >= 0; i-- {
+			if start.valid && !n.cow.less(n.items[i], start.item) {
+				if !includeStart || hit || n.cow.less(start.item, n.items[i]) {
+					continue
+				}
+			}
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if stop.valid && !n.cow.less(stop.item, n.items[i]) {
+				return hit, false //	continue
+			}
+			hit = true
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	}
+	return hit, true
+}
+
+// print is used for testing/debugging purposes.
+func (n *node[T]) print(w io.Writer, level int) {
+	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
+	for _, c := range n.children {
+		c.print(w, level+1)
+	}
+}
+
+// BTreeG is a generic implementation of a B-Tree.
+//
+// BTreeG stores items of type T in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTreeG[T any] struct {
+	degree int
+	length int
+	root   *node[T]
+	cow    *copyOnWriteContext[T]
+}
+
+// LessFunc[T] determines how to order a type 'T'.  It should implement a strict
+// ordering, and should return true if within that ordering, 'a' < 'b'.
+type LessFunc[T any] func(a, b T) bool
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place.  Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext[T any] struct {
+	freelist *FreeListG[T]
+	less     LessFunc[T]
+}
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) {
+	// Create two entirely new copy-on-write contexts.
+	// This operation effectively creates three trees:
+	//   the original, shared nodes (old b.cow)
+	//   the new b.cow nodes
+	//   the new out.cow nodes
+	cow1, cow2 := *t.cow, *t.cow
+	out := *t
+	t.cow = &cow1
+	out.cow = &cow2
+	return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTreeG[T]) maxItems() int {
+	return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTreeG[T]) minItems() int {
+	return t.degree - 1
+}
+
+func (c *copyOnWriteContext[T]) newNode() (n *node[T]) {
+	n = c.freelist.newNode()
+	n.cow = c
+	return
+}
+
+type freeType int
+
+const (
+	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+	ftStored                       // node was stored in the freelist for later use
+	ftNotOwned                     // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context.  It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType {
+	if n.cow == c {
+		// clear to allow GC
+		n.items.truncate(0)
+		n.children.truncate(0)
+		n.cow = nil
+		if c.freelist.freeNode(n) {
+			return ftStored
+		} else {
+			return ftFreelistFull
+		}
+	} else {
+		return ftNotOwned
+	}
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned,
+// and the second return value is true.  Otherwise, (zeroValue, false)
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) {
+	if t.root == nil {
+		t.root = t.cow.newNode()
+		t.root.items = append(t.root.items, item)
+		t.length++
+		return
+	} else {
+		t.root = t.root.mutableFor(t.cow)
+		if len(t.root.items) >= t.maxItems() {
+			item2, second := t.root.split(t.maxItems() / 2)
+			oldroot := t.root
+			t.root = t.cow.newNode()
+			t.root.items = append(t.root.items, item2)
+			t.root.children = append(t.root.children, oldroot, second)
+		}
+	}
+	out, outb := t.root.insert(item, t.maxItems())
+	if !outb {
+		t.length++
+	}
+	return out, outb
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) Delete(item T) (T, bool) {
+	return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMin() (T, bool) {
+	var zero T
+	return t.deleteItem(zero, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMax() (T, bool) {
+	var zero T
+	return t.deleteItem(zero, removeMax)
+}
+
+func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) {
+	if t.root == nil || len(t.root.items) == 0 {
+		return
+	}
+	t.root = t.root.mutableFor(t.cow)
+	out, outb := t.root.remove(item, t.minItems(), typ)
+	if len(t.root.items) == 0 && len(t.root.children) > 0 {
+		oldroot := t.root
+		t.root = t.root.children[0]
+		t.cow.freeNode(oldroot)
+	}
+	if outb {
+		t.length--
+	}
+	return out, outb
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it.  It returns
+// (zeroValue, false) if unable to find that item.
+func (t *BTreeG[T]) Get(key T) (_ T, _ bool) {
+	if t.root == nil {
+		return
+	}
+	return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Min() (_ T, _ bool) {
+	return min(t.root)
+}
+
+// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Max() (_ T, _ bool) {
+	return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTreeG[T]) Has(key T) bool {
+	_, ok := t.Get(key)
+	return ok
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTreeG[T]) Len() int {
+	return t.length
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTreeG[T]) Clear(addNodesToFreelist bool) {
+	if t.root != nil && addNodesToFreelist {
+		t.root.reset(t.cow)
+	}
+	t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist.  It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up.  Returns true if parent reset call should continue.
+func (n *node[T]) reset(c *copyOnWriteContext[T]) bool {
+	for _, child := range n.children {
+		if !child.reset(c) {
+			return false
+		}
+	}
+	return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+	return a < b.(Int)
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree BTreeG[Item]
+
+var itemLess LessFunc[Item] = func(a, b Item) bool {
+	return a.Less(b)
+}
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+	return (*BTree)(NewG[Item](degree, itemLess))
+}
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList FreeListG[Item]
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+	return (*FreeList)(NewFreeListG[Item](size))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+	return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f)))
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator ItemIteratorG[Item]
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+	return (*BTree)((*BTreeG[Item])(t).Clone())
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+	i, _ := (*BTreeG[Item])(t).Delete(item)
+	return i
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+	i, _ := (*BTreeG[Item])(t).DeleteMax()
+	return i
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+	i, _ := (*BTreeG[Item])(t).DeleteMin()
+	return i
+}
+
+// Get looks for the key item in the tree, returning it.  It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+	i, _ := (*BTreeG[Item])(t).Get(key)
+	return i
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+	i, _ := (*BTreeG[Item])(t).Max()
+	return i
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+	i, _ := (*BTreeG[Item])(t).Min()
+	return i
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+	return (*BTreeG[Item])(t).Has(key)
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+	i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item)
+	return i
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+	(*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator))
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+	(*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator))
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+	return (*BTreeG[Item])(t).Len()
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+	(*BTreeG[Item])(t).Clear(addNodesToFreelist)
+}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 0000000..32017f8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 0000000..0f5b8a4
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,671 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// [reflect.DeepEqual] for comparing whether two values are semantically equal.
+// It is intended to only be used in tests, as performance is not a goal and
+// it may panic if it cannot compare the values. Its propensity towards
+// panicking means that its unsuitable for production environments where a
+// spurious panic may be fatal.
+//
+// The primary features of cmp are:
+//
+//   - When the default behavior of equality does not suit the test's needs,
+//     custom equality functions can override the equality operation.
+//     For example, an equality function may report floats as equal so long as
+//     they are within some tolerance of each other.
+//
+//   - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
+//     to determine equality. This allows package authors to determine
+//     the equality operation for the types that they define.
+//
+//   - If no custom equality functions are used and no Equal method is defined,
+//     equality is determined by recursively comparing the primitive kinds on
+//     both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
+//     unexported fields are not compared by default; they result in panics
+//     unless suppressed by using an [Ignore] option
+//     (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
+//     or explicitly compared using the [Exporter] option.
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/diff"
+	"github.com/google/go-cmp/cmp/internal/function"
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO(≥go1.18): Use any instead of interface{}.
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+//   - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
+//     remain after applying all path filters, value filters, and type filters.
+//     If at least one [Ignore] exists in S, then the comparison is ignored.
+//     If the number of [Transformer] and [Comparer] options in S is non-zero,
+//     then Equal panics because it is ambiguous which option to use.
+//     If S contains a single [Transformer], then use that to transform
+//     the current values and recursively call Equal on the output values.
+//     If S contains a single [Comparer], then use that to compare the current values.
+//     Otherwise, evaluation proceeds to the next rule.
+//
+//   - If the values have an Equal method of the form "(T) Equal(T) bool" or
+//     "(T) Equal(I) bool" where T is assignable to I, then use the result of
+//     x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+//     evaluation proceeds to the next rule.
+//
+//   - Lastly, try to compare x and y based on their basic kinds.
+//     Simple kinds like booleans, integers, floats, complex numbers, strings,
+//     and channels are compared using the equivalent of the == operator in Go.
+//     Functions are only equal if they are both nil, otherwise they are unequal.
+//
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an [Ignore] option
+// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
+// or the [Exporter] option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using
+// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
+//
+// Before recursing into a pointer, slice element, or map, the current path
+// is checked to detect whether the address has already been visited.
+// If there is a cycle, then the pointed at values are considered equal
+// only if both addresses were previously visited in the same path step.
+func Equal(x, y interface{}, opts ...Option) bool {
+	s := newState(opts)
+	s.compareAny(rootStep(x, y))
+	return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added from y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
+//
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
+func Diff(x, y interface{}, opts ...Option) string {
+	s := newState(opts)
+
+	// Optimization: If there are no other reporters, we can optimize for the
+	// common case where the result is equal (and thus no reported difference).
+	// This avoids the expensive construction of a difference tree.
+	if len(s.reporters) == 0 {
+		s.compareAny(rootStep(x, y))
+		if s.result.Equal() {
+			return ""
+		}
+		s.result = diff.Result{} // Reset results
+	}
+
+	r := new(defaultReporter)
+	s.reporters = append(s.reporters, reporter{r})
+	s.compareAny(rootStep(x, y))
+	d := r.String()
+	if (d == "") != s.result.Equal() {
+		panic("inconsistent difference and equality results")
+	}
+	return d
+}
+
+// rootStep constructs the first path step. If x and y have differing types,
+// then they are stored within an empty interface type.
+func rootStep(x, y interface{}) PathStep {
+	vx := reflect.ValueOf(x)
+	vy := reflect.ValueOf(y)
+
+	// If the inputs are different types, auto-wrap them in an empty interface
+	// so that they have the same parent type.
+	var t reflect.Type
+	if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+		t = anyType
+		if vx.IsValid() {
+			vvx := reflect.New(t).Elem()
+			vvx.Set(vx)
+			vx = vvx
+		}
+		if vy.IsValid() {
+			vvy := reflect.New(t).Elem()
+			vvy.Set(vy)
+			vy = vvy
+		}
+	} else {
+		t = vx.Type()
+	}
+
+	return &pathStep{t, vx, vy}
+}
+
+type state struct {
+	// These fields represent the "comparison state".
+	// Calling statelessCompare must not result in observable changes to these.
+	result    diff.Result // The current result of comparison
+	curPath   Path        // The current path in the value tree
+	curPtrs   pointerPath // The current set of visited pointers
+	reporters []reporter  // Optional reporters
+
+	// recChecker checks for infinite cycles applying the same set of
+	// transformers upon the output of itself.
+	recChecker recChecker
+
+	// dynChecker triggers pseudo-random checks for option correctness.
+	// It is safe for statelessCompare to mutate this value.
+	dynChecker dynChecker
+
+	// These fields, once set by processOption, will not change.
+	exporters []exporter // List of exporters for structs with unexported fields
+	opts      Options    // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+	// Always ensure a validator option exists to validate the inputs.
+	s := &state{opts: Options{validator{}}}
+	s.curPtrs.Init()
+	s.processOption(Options(opts))
+	return s
+}
+
+func (s *state) processOption(opt Option) {
+	switch opt := opt.(type) {
+	case nil:
+	case Options:
+		for _, o := range opt {
+			s.processOption(o)
+		}
+	case coreOption:
+		type filtered interface {
+			isFiltered() bool
+		}
+		if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+			panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+		}
+		s.opts = append(s.opts, opt)
+	case exporter:
+		s.exporters = append(s.exporters, opt)
+	case reporter:
+		s.reporters = append(s.reporters, opt)
+	default:
+		panic(fmt.Sprintf("unknown option %T", opt))
+	}
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(step PathStep) diff.Result {
+	// We do not save and restore curPath and curPtrs because all of the
+	// compareX methods should properly push and pop from them.
+	// It is an implementation bug if the contents of the paths differ from
+	// when calling this function to when returning from it.
+
+	oldResult, oldReporters := s.result, s.reporters
+	s.result = diff.Result{} // Reset result
+	s.reporters = nil        // Remove reporters to avoid spurious printouts
+	s.compareAny(step)
+	res := s.result
+	s.result, s.reporters = oldResult, oldReporters
+	return res
+}
+
+func (s *state) compareAny(step PathStep) {
+	// Update the path stack.
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for _, r := range s.reporters {
+		r.PushStep(step)
+		defer r.PopStep()
+	}
+	s.recChecker.Check(s.curPath)
+
+	// Cycle-detection for slice elements (see NOTE in compareSlice).
+	t := step.Type()
+	vx, vy := step.Values()
+	if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
+		px, py := vx.Addr(), vy.Addr()
+		if eq, visited := s.curPtrs.Push(px, py); visited {
+			s.report(eq, reportByCycle)
+			return
+		}
+		defer s.curPtrs.Pop(px, py)
+	}
+
+	// Rule 1: Check whether an option applies on this node in the value tree.
+	if s.tryOptions(t, vx, vy) {
+		return
+	}
+
+	// Rule 2: Check whether the type has a valid Equal method.
+	if s.tryMethod(t, vx, vy) {
+		return
+	}
+
+	// Rule 3: Compare based on the underlying kind.
+	switch t.Kind() {
+	case reflect.Bool:
+		s.report(vx.Bool() == vy.Bool(), 0)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		s.report(vx.Int() == vy.Int(), 0)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		s.report(vx.Uint() == vy.Uint(), 0)
+	case reflect.Float32, reflect.Float64:
+		s.report(vx.Float() == vy.Float(), 0)
+	case reflect.Complex64, reflect.Complex128:
+		s.report(vx.Complex() == vy.Complex(), 0)
+	case reflect.String:
+		s.report(vx.String() == vy.String(), 0)
+	case reflect.Chan, reflect.UnsafePointer:
+		s.report(vx.Pointer() == vy.Pointer(), 0)
+	case reflect.Func:
+		s.report(vx.IsNil() && vy.IsNil(), 0)
+	case reflect.Struct:
+		s.compareStruct(t, vx, vy)
+	case reflect.Slice, reflect.Array:
+		s.compareSlice(t, vx, vy)
+	case reflect.Map:
+		s.compareMap(t, vx, vy)
+	case reflect.Ptr:
+		s.comparePtr(t, vx, vy)
+	case reflect.Interface:
+		s.compareInterface(t, vx, vy)
+	default:
+		panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+	}
+}
+
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
+	// Evaluate all filters and apply the remaining options.
+	if opt := s.opts.filter(s, t, vx, vy); opt != nil {
+		opt.apply(s, vx, vy)
+		return true
+	}
+	return false
+}
+
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
+	// Check if this type even has an Equal method.
+	m, ok := t.MethodByName("Equal")
+	if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+		return false
+	}
+
+	eq := s.callTTBFunc(m.Func, vx, vy)
+	s.report(eq, reportByMethod)
+	return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{v})[0]
+	}
+
+	// Run the function twice and ensure that we get the same results back.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, v)
+	got := <-c
+	want := f.Call([]reflect.Value{v})[0]
+	if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
+		// To avoid false-positives with non-reflexive equality operations,
+		// we sanity check whether a value is equal to itself.
+		if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
+			return want
+		}
+		panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
+	}
+	return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{x, y})[0].Bool()
+	}
+
+	// Swapping the input arguments is sufficient to check that
+	// f is symmetric and deterministic.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, y, x)
+	got := <-c
+	want := f.Call([]reflect.Value{x, y})[0].Bool()
+	if !got.IsValid() || got.Bool() != want {
+		panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
+	}
+	return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+	var ret reflect.Value
+	defer func() {
+		recover() // Ignore panics, let the other call to f panic instead
+		c <- ret
+	}()
+	ret = f.Call(vs)[0]
+}
+
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+	var addr bool
+	var vax, vay reflect.Value // Addressable versions of vx and vy
+
+	var mayForce, mayForceInit bool
+	step := StructField{&structField{}}
+	for i := 0; i < t.NumField(); i++ {
+		step.typ = t.Field(i).Type
+		step.vx = vx.Field(i)
+		step.vy = vy.Field(i)
+		step.name = t.Field(i).Name
+		step.idx = i
+		step.unexported = !isExported(step.name)
+		if step.unexported {
+			if step.name == "_" {
+				continue
+			}
+			// Defer checking of unexported fields until later to give an
+			// Ignore a chance to ignore the field.
+			if !vax.IsValid() || !vay.IsValid() {
+				// For retrieveUnexportedField to work, the parent struct must
+				// be addressable. Create a new copy of the values if
+				// necessary to make them addressable.
+				addr = vx.CanAddr() || vy.CanAddr()
+				vax = makeAddressable(vx)
+				vay = makeAddressable(vy)
+			}
+			if !mayForceInit {
+				for _, xf := range s.exporters {
+					mayForce = mayForce || xf(t)
+				}
+				mayForceInit = true
+			}
+			step.mayForce = mayForce
+			step.paddr = addr
+			step.pvx = vax
+			step.pvy = vay
+			step.field = t.Field(i)
+		}
+		s.compareAny(step)
+	}
+}
+
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+	isSlice := t.Kind() == reflect.Slice
+	if isSlice && (vx.IsNil() || vy.IsNil()) {
+		s.report(vx.IsNil() && vy.IsNil(), 0)
+		return
+	}
+
+	// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
+	// since slices represents a list of pointers, rather than a single pointer.
+	// The pointer checking logic must be handled on a per-element basis
+	// in compareAny.
+	//
+	// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
+	// pointer P, a length N, and a capacity C. Supposing each slice element has
+	// a memory size of M, then the slice is equivalent to the list of pointers:
+	//	[P+i*M for i in range(N)]
+	//
+	// For example, v[:0] and v[:1] are slices with the same starting pointer,
+	// but they are clearly different values. Using the slice pointer alone
+	// violates the assumption that equal pointers implies equal values.
+
+	step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
+	withIndexes := func(ix, iy int) SliceIndex {
+		if ix >= 0 {
+			step.vx, step.xkey = vx.Index(ix), ix
+		} else {
+			step.vx, step.xkey = reflect.Value{}, -1
+		}
+		if iy >= 0 {
+			step.vy, step.ykey = vy.Index(iy), iy
+		} else {
+			step.vy, step.ykey = reflect.Value{}, -1
+		}
+		return step
+	}
+
+	// Ignore options are able to ignore missing elements in a slice.
+	// However, detecting these reliably requires an optimal differencing
+	// algorithm, for which diff.Difference is not.
+	//
+	// Instead, we first iterate through both slices to detect which elements
+	// would be ignored if standing alone. The index of non-discarded elements
+	// are stored in a separate slice, which diffing is then performed on.
+	var indexesX, indexesY []int
+	var ignoredX, ignoredY []bool
+	for ix := 0; ix < vx.Len(); ix++ {
+		ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+		if !ignored {
+			indexesX = append(indexesX, ix)
+		}
+		ignoredX = append(ignoredX, ignored)
+	}
+	for iy := 0; iy < vy.Len(); iy++ {
+		ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+		if !ignored {
+			indexesY = append(indexesY, iy)
+		}
+		ignoredY = append(ignoredY, ignored)
+	}
+
+	// Compute an edit-script for slices vx and vy (excluding ignored elements).
+	edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+		return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+	})
+
+	// Replay the ignore-scripts and the edit-script.
+	var ix, iy int
+	for ix < vx.Len() || iy < vy.Len() {
+		var e diff.EditType
+		switch {
+		case ix < len(ignoredX) && ignoredX[ix]:
+			e = diff.UniqueX
+		case iy < len(ignoredY) && ignoredY[iy]:
+			e = diff.UniqueY
+		default:
+			e, edits = edits[0], edits[1:]
+		}
+		switch e {
+		case diff.UniqueX:
+			s.compareAny(withIndexes(ix, -1))
+			ix++
+		case diff.UniqueY:
+			s.compareAny(withIndexes(-1, iy))
+			iy++
+		default:
+			s.compareAny(withIndexes(ix, iy))
+			ix++
+			iy++
+		}
+	}
+}
+
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
+	if vx.IsNil() || vy.IsNil() {
+		s.report(vx.IsNil() && vy.IsNil(), 0)
+		return
+	}
+
+	// Cycle-detection for maps.
+	if eq, visited := s.curPtrs.Push(vx, vy); visited {
+		s.report(eq, reportByCycle)
+		return
+	}
+	defer s.curPtrs.Pop(vx, vy)
+
+	// We combine and sort the two map keys so that we can perform the
+	// comparisons in a deterministic order.
+	step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
+	for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+		step.vx = vx.MapIndex(k)
+		step.vy = vy.MapIndex(k)
+		step.key = k
+		if !step.vx.IsValid() && !step.vy.IsValid() {
+			// It is possible for both vx and vy to be invalid if the
+			// key contained a NaN value in it.
+			//
+			// Even with the ability to retrieve NaN keys in Go 1.12,
+			// there still isn't a sensible way to compare the values since
+			// a NaN key may map to multiple unordered values.
+			// The most reasonable way to compare NaNs would be to compare the
+			// set of values. However, this is impossible to do efficiently
+			// since set equality is provably an O(n^2) operation given only
+			// an Equal function. If we had a Less function or Hash function,
+			// this could be done in O(n*log(n)) or O(n), respectively.
+			//
+			// Rather than adding complex logic to deal with NaNs, make it
+			// the user's responsibility to compare such obscure maps.
+			const help = "consider providing a Comparer to compare the map"
+			panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
+		}
+		s.compareAny(step)
+	}
+}
+
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+	if vx.IsNil() || vy.IsNil() {
+		s.report(vx.IsNil() && vy.IsNil(), 0)
+		return
+	}
+
+	// Cycle-detection for pointers.
+	if eq, visited := s.curPtrs.Push(vx, vy); visited {
+		s.report(eq, reportByCycle)
+		return
+	}
+	defer s.curPtrs.Pop(vx, vy)
+
+	vx, vy = vx.Elem(), vy.Elem()
+	s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+	if vx.IsNil() || vy.IsNil() {
+		s.report(vx.IsNil() && vy.IsNil(), 0)
+		return
+	}
+	vx, vy = vx.Elem(), vy.Elem()
+	if vx.Type() != vy.Type() {
+		s.report(false, 0)
+		return
+	}
+	s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+	if rf&reportByIgnore == 0 {
+		if eq {
+			s.result.NumSame++
+			rf |= reportEqual
+		} else {
+			s.result.NumDiff++
+			rf |= reportUnequal
+		}
+	}
+	for _, r := range s.reporters {
+		r.Report(Result{flags: rf})
+	}
+}
+
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+	const minLen = 1 << 16
+	if rc.next == 0 {
+		rc.next = minLen
+	}
+	if len(p) < rc.next {
+		return
+	}
+	rc.next <<= 1
+
+	// Check whether the same transformer has appeared at least twice.
+	var ss []string
+	m := map[Option]int{}
+	for _, ps := range p {
+		if t, ok := ps.(Transform); ok {
+			t := t.Option()
+			if m[t] == 1 { // Transformer was used exactly once before
+				tf := t.(*transformer).fnc.Type()
+				ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+			}
+			m[t]++
+		}
+	}
+	if len(ss) > 0 {
+		const warning = "recursive set of Transformers detected"
+		const help = "consider using cmpopts.AcyclicTransformer"
+		set := strings.Join(ss, "\n\t")
+		panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
+	}
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//
+//	0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+	ok := dc.curr == dc.next
+	if ok {
+		dc.curr = 0
+		dc.next++
+	}
+	dc.curr++
+	return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+	if v.CanAddr() {
+		return v
+	}
+	vc := reflect.New(v.Type()).Elem()
+	vc.Set(v)
+	return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go
new file mode 100644
index 0000000..29f82fe
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/export.go
@@ -0,0 +1,31 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve. If addr is false,
+// then the returned value will be shallowed copied to be non-addressable.
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
+	ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
+	if !addr {
+		// A field is addressable if and only if the struct is addressable.
+		// If the original parent value was not addressable, shallow copy the
+		// value to make it non-addressable to avoid leaking an implementation
+		// detail of how forcibly exporting a field works.
+		if ve.Kind() == reflect.Interface && ve.IsNil() {
+			return reflect.Zero(f.Type)
+		}
+		return reflect.ValueOf(ve.Interface()).Convert(f.Type)
+	}
+	return ve
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 0000000..36062a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,18 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmp_debug
+// +build !cmp_debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+	return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 0000000..a3b97a1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,123 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmp_debug
+// +build cmp_debug
+
+package diff
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+//	go test -tags=cmp_debug -v
+//
+// Example output:
+//	=== RUN   TestDifference/#34
+//	┌───────────────────────────────┐
+//	│ \ · · · · · · · · · · · · · · │
+//	│ · # · · · · · · · · · · · · · │
+//	│ · \ · · · · · · · · · · · · · │
+//	│ · · \ · · · · · · · · · · · · │
+//	│ · · · X # · · · · · · · · · · │
+//	│ · · · # \ · · · · · · · · · · │
+//	│ · · · · · # # · · · · · · · · │
+//	│ · · · · · # \ · · · · · · · · │
+//	│ · · · · · · · \ · · · · · · · │
+//	│ · · · · · · · · \ · · · · · · │
+//	│ · · · · · · · · · \ · · · · · │
+//	│ · · · · · · · · · · \ · · # · │
+//	│ · · · · · · · · · · · \ # # · │
+//	│ · · · · · · · · · · · # # # · │
+//	│ · · · · · · · · · · # # # # · │
+//	│ · · · · · · · · · # # # # # · │
+//	│ · · · · · · · · · · · · · · \ │
+//	└───────────────────────────────┘
+//	[.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+	updateDelay  = 100 * time.Millisecond
+	finishDelay  = 500 * time.Millisecond
+	ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+	sync.Mutex
+	p1, p2           EditScript
+	fwdPath, revPath *EditScript
+	grid             []byte
+	lines            int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+	dbg.Lock()
+	dbg.fwdPath, dbg.revPath = p1, p2
+	top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+	row := "│ " + strings.Repeat("· ", nx) + "│\n"
+	btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+	dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+	dbg.lines = strings.Count(dbg.String(), "\n")
+	fmt.Print(dbg)
+
+	// Wrap the EqualFunc so that we can intercept each result.
+	return func(ix, iy int) (r Result) {
+		cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+		for i := range cell {
+			cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+		}
+		switch r = f(ix, iy); {
+		case r.Equal():
+			cell[0] = '\\'
+		case r.Similar():
+			cell[0] = 'X'
+		default:
+			cell[0] = '#'
+		}
+		return
+	}
+}
+
+func (dbg *debugger) Update() {
+	dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+	dbg.print(finishDelay)
+	dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+	dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+	for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+		dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+	}
+	return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+	if ansiTerminal {
+		fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+	}
+	fmt.Print(dbg)
+	time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 0000000..a248e54
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,402 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+import (
+	"math/rand"
+	"time"
+
+	"github.com/google/go-cmp/cmp/internal/flags"
+)
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+	// Identity indicates that a symbol pair is identical in both list X and Y.
+	Identity EditType = iota
+	// UniqueX indicates that a symbol only exists in X and not Y.
+	UniqueX
+	// UniqueY indicates that a symbol only exists in Y and not X.
+	UniqueY
+	// Modified indicates that a symbol pair is a modification of each other.
+	Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+	b := make([]byte, len(es))
+	for i, e := range es {
+		switch e {
+		case Identity:
+			b[i] = '.'
+		case UniqueX:
+			b[i] = 'X'
+		case UniqueY:
+			b[i] = 'Y'
+		case Modified:
+			b[i] = 'M'
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+	for _, e := range es {
+		switch e {
+		case Identity:
+			s.NI++
+		case UniqueX:
+			s.NX++
+		case UniqueY:
+			s.NY++
+		case Modified:
+			s.NM++
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+	if b {
+		return Result{NumSame: 1} // Equal, Similar
+	} else {
+		return Result{NumDiff: 2} // Not Equal, not Similar
+	}
+}
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
+func (r Result) Similar() bool {
+	// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+	return r.NumSame+1 >= r.NumDiff
+}
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+//   - eq == (es.Dist()==0)
+//   - nx == es.LenX()
+//   - ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+	// This algorithm is based on traversing what is known as an "edit-graph".
+	// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+	// by Eugene W. Myers. Since D can be as large as N itself, this is
+	// effectively O(N^2). Unlike the algorithm from that paper, we are not
+	// interested in the optimal path, but at least some "decent" path.
+	//
+	// For example, let X and Y be lists of symbols:
+	//	X = [A B C A B B A]
+	//	Y = [C B A B A C]
+	//
+	// The edit-graph can be drawn as the following:
+	//	   A B C A B B A
+	//	  ┌─────────────┐
+	//	C │_|_|\|_|_|_|_│ 0
+	//	B │_|\|_|_|\|\|_│ 1
+	//	A │\|_|_|\|_|_|\│ 2
+	//	B │_|\|_|_|\|\|_│ 3
+	//	A │\|_|_|\|_|_|\│ 4
+	//	C │ | |\| | | | │ 5
+	//	  └─────────────┘ 6
+	//	   0 1 2 3 4 5 6 7
+	//
+	// List X is written along the horizontal axis, while list Y is written
+	// along the vertical axis. At any point on this grid, if the symbol in
+	// list X matches the corresponding symbol in list Y, then a '\' is drawn.
+	// The goal of any minimal edit-script algorithm is to find a path from the
+	// top-left corner to the bottom-right corner, while traveling through the
+	// fewest horizontal or vertical edges.
+	// A horizontal edge is equivalent to inserting a symbol from list X.
+	// A vertical edge is equivalent to inserting a symbol from list Y.
+	// A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+	// Invariants:
+	//   - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+	//   - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+	//
+	// In general:
+	//   - fwdFrontier.X < revFrontier.X
+	//   - fwdFrontier.Y < revFrontier.Y
+	//
+	// Unless, it is time for the algorithm to terminate.
+	fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+	revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+	fwdFrontier := fwdPath.point // Forward search frontier
+	revFrontier := revPath.point // Reverse search frontier
+
+	// Search budget bounds the cost of searching for better paths.
+	// The longest sequence of non-matching symbols that can be tolerated is
+	// approximately the square-root of the search budget.
+	searchBudget := 4 * (nx + ny) // O(n)
+
+	// Running the tests with the "cmp_debug" build tag prints a visualization
+	// of the algorithm running in real-time. This is educational for
+	// understanding how the algorithm works. See debug_enable.go.
+	f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+
+	// The algorithm below is a greedy, meet-in-the-middle algorithm for
+	// computing sub-optimal edit-scripts between two lists.
+	//
+	// The algorithm is approximately as follows:
+	//   - Searching for differences switches back-and-forth between
+	//     a search that starts at the beginning (the top-left corner), and
+	//     a search that starts at the end (the bottom-right corner).
+	//     The goal of the search is connect with the search
+	//     from the opposite corner.
+	//   - As we search, we build a path in a greedy manner,
+	//     where the first match seen is added to the path (this is sub-optimal,
+	//     but provides a decent result in practice). When matches are found,
+	//     we try the next pair of symbols in the lists and follow all matches
+	//     as far as possible.
+	//   - When searching for matches, we search along a diagonal going through
+	//     through the "frontier" point. If no matches are found,
+	//     we advance the frontier towards the opposite corner.
+	//   - This algorithm terminates when either the X coordinates or the
+	//     Y coordinates of the forward and reverse frontier points ever intersect.
+
+	// This algorithm is correct even if searching only in the forward direction
+	// or in the reverse direction. We do both because it is commonly observed
+	// that two lists commonly differ because elements were added to the front
+	// or end of the other list.
+	//
+	// Non-deterministically start with either the forward or reverse direction
+	// to introduce some deliberate instability so that we have the flexibility
+	// to change this algorithm in the future.
+	if flags.Deterministic || randBool {
+		goto forwardSearch
+	} else {
+		goto reverseSearch
+	}
+
+forwardSearch:
+	{
+		// Forward search from the beginning.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			goto finishSearch
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+			switch {
+			case p.X >= revPath.X || p.Y < fwdPath.Y:
+				stop1 = true // Hit top-right corner
+			case p.Y >= revPath.Y || p.X < fwdPath.X:
+				stop2 = true // Hit bottom-left corner
+			case f(p.X, p.Y).Equal():
+				// Match found, so connect the path to this point.
+				fwdPath.connect(p, f)
+				fwdPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(fwdPath.X, fwdPath.Y).Equal() {
+						break
+					}
+					fwdPath.append(Identity)
+				}
+				fwdFrontier = fwdPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards reverse point.
+		if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+			fwdFrontier.X++
+		} else {
+			fwdFrontier.Y++
+		}
+		goto reverseSearch
+	}
+
+reverseSearch:
+	{
+		// Reverse search from the end.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			goto finishSearch
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{revFrontier.X - z, revFrontier.Y + z}
+			switch {
+			case fwdPath.X >= p.X || revPath.Y < p.Y:
+				stop1 = true // Hit bottom-left corner
+			case fwdPath.Y >= p.Y || revPath.X < p.X:
+				stop2 = true // Hit top-right corner
+			case f(p.X-1, p.Y-1).Equal():
+				// Match found, so connect the path to this point.
+				revPath.connect(p, f)
+				revPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(revPath.X-1, revPath.Y-1).Equal() {
+						break
+					}
+					revPath.append(Identity)
+				}
+				revFrontier = revPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards forward point.
+		if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+			revFrontier.X--
+		} else {
+			revFrontier.Y--
+		}
+		goto forwardSearch
+	}
+
+finishSearch:
+	// Join the forward and reverse paths and then append the reverse path.
+	fwdPath.connect(revPath.point, f)
+	for i := len(revPath.es) - 1; i >= 0; i-- {
+		t := revPath.es[i]
+		revPath.es = revPath.es[:i]
+		fwdPath.append(t)
+	}
+	debug.Finish()
+	return fwdPath.es
+}
+
+type path struct {
+	dir   int // +1 if forward, -1 if reverse
+	point     // Leading point of the EditScript path
+	es    EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+	if p.dir > 0 {
+		// Connect in forward direction.
+		for dst.X > p.X && dst.Y > p.Y {
+			switch r := f(p.X, p.Y); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case dst.X-p.X >= dst.Y-p.Y:
+				p.append(UniqueX)
+			default:
+				p.append(UniqueY)
+			}
+		}
+		for dst.X > p.X {
+			p.append(UniqueX)
+		}
+		for dst.Y > p.Y {
+			p.append(UniqueY)
+		}
+	} else {
+		// Connect in reverse direction.
+		for p.X > dst.X && p.Y > dst.Y {
+			switch r := f(p.X-1, p.Y-1); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case p.Y-dst.Y >= p.X-dst.X:
+				p.append(UniqueY)
+			default:
+				p.append(UniqueX)
+			}
+		}
+		for p.X > dst.X {
+			p.append(UniqueX)
+		}
+		for p.Y > dst.Y {
+			p.append(UniqueY)
+		}
+	}
+}
+
+func (p *path) append(t EditType) {
+	p.es = append(p.es, t)
+	switch t {
+	case Identity, Modified:
+		p.add(p.dir, p.dir)
+	case UniqueX:
+		p.add(p.dir, 0)
+	case UniqueY:
+		p.add(0, p.dir)
+	}
+	debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
+//	[0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+	if x&1 != 0 {
+		x = ^x
+	}
+	return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 0000000..d8e459c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 0000000..def01a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package function provides functionality for identifying function types.
+package function
+
+import (
+	"reflect"
+	"regexp"
+	"runtime"
+	"strings"
+)
+
+type funcType int
+
+const (
+	_ funcType = iota
+
+	tbFunc  // func(T) bool
+	ttbFunc // func(T, T) bool
+	ttiFunc // func(T, T) int
+	trbFunc // func(T, R) bool
+	tibFunc // func(T, I) bool
+	trFunc  // func(T) R
+
+	Equal             = ttbFunc // func(T, T) bool
+	EqualAssignable   = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+	Transformer       = trFunc  // func(T) R
+	ValueFilter       = ttbFunc // func(T, T) bool
+	Less              = ttbFunc // func(T, T) bool
+	Compare           = ttiFunc // func(T, T) int
+	ValuePredicate    = tbFunc  // func(T) bool
+	KeyValuePredicate = trbFunc // func(T, R) bool
+)
+
+var boolType = reflect.TypeOf(true)
+var intType = reflect.TypeOf(0)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+	if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+		return false
+	}
+	ni, no := t.NumIn(), t.NumOut()
+	switch ft {
+	case tbFunc: // func(T) bool
+		if ni == 1 && no == 1 && t.Out(0) == boolType {
+			return true
+		}
+	case ttbFunc: // func(T, T) bool
+		if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+			return true
+		}
+	case ttiFunc: // func(T, T) int
+		if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
+			return true
+		}
+	case trbFunc: // func(T, R) bool
+		if ni == 2 && no == 1 && t.Out(0) == boolType {
+			return true
+		}
+	case tibFunc: // func(T, I) bool
+		if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+			return true
+		}
+	case trFunc: // func(T) R
+		if ni == 1 && no == 1 {
+			return true
+		}
+	}
+	return false
+}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+	fnc := runtime.FuncForPC(v.Pointer())
+	if fnc == nil {
+		return "<unknown>"
+	}
+	fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+	// Method closures have a "-fm" suffix.
+	fullName = strings.TrimSuffix(fullName, "-fm")
+
+	var name string
+	for len(fullName) > 0 {
+		inParen := strings.HasSuffix(fullName, ")")
+		fullName = strings.TrimSuffix(fullName, ")")
+
+		s := lastIdentRx.FindString(fullName)
+		if s == "" {
+			break
+		}
+		name = s + "." + name
+		fullName = strings.TrimSuffix(fullName, s)
+
+		if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+			fullName = fullName[:i]
+		}
+		fullName = strings.TrimSuffix(fullName, ".")
+	}
+	return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
new file mode 100644
index 0000000..7b498bb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
@@ -0,0 +1,164 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+	"reflect"
+	"strconv"
+)
+
+var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// TypeString is nearly identical to reflect.Type.String,
+// but has an additional option to specify that full type names be used.
+func TypeString(t reflect.Type, qualified bool) string {
+	return string(appendTypeName(nil, t, qualified, false))
+}
+
+func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
+	// BUG: Go reflection provides no way to disambiguate two named types
+	// of the same name and within the same package,
+	// but declared within the namespace of different functions.
+
+	// Use the "any" alias instead of "interface{}" for better readability.
+	if t == anyType {
+		return append(b, "any"...)
+	}
+
+	// Named type.
+	if t.Name() != "" {
+		if qualified && t.PkgPath() != "" {
+			b = append(b, '"')
+			b = append(b, t.PkgPath()...)
+			b = append(b, '"')
+			b = append(b, '.')
+			b = append(b, t.Name()...)
+		} else {
+			b = append(b, t.String()...)
+		}
+		return b
+	}
+
+	// Unnamed type.
+	switch k := t.Kind(); k {
+	case reflect.Bool, reflect.String, reflect.UnsafePointer,
+		reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+		reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+		b = append(b, k.String()...)
+	case reflect.Chan:
+		if t.ChanDir() == reflect.RecvDir {
+			b = append(b, "<-"...)
+		}
+		b = append(b, "chan"...)
+		if t.ChanDir() == reflect.SendDir {
+			b = append(b, "<-"...)
+		}
+		b = append(b, ' ')
+		b = appendTypeName(b, t.Elem(), qualified, false)
+	case reflect.Func:
+		if !elideFunc {
+			b = append(b, "func"...)
+		}
+		b = append(b, '(')
+		for i := 0; i < t.NumIn(); i++ {
+			if i > 0 {
+				b = append(b, ", "...)
+			}
+			if i == t.NumIn()-1 && t.IsVariadic() {
+				b = append(b, "..."...)
+				b = appendTypeName(b, t.In(i).Elem(), qualified, false)
+			} else {
+				b = appendTypeName(b, t.In(i), qualified, false)
+			}
+		}
+		b = append(b, ')')
+		switch t.NumOut() {
+		case 0:
+			// Do nothing
+		case 1:
+			b = append(b, ' ')
+			b = appendTypeName(b, t.Out(0), qualified, false)
+		default:
+			b = append(b, " ("...)
+			for i := 0; i < t.NumOut(); i++ {
+				if i > 0 {
+					b = append(b, ", "...)
+				}
+				b = appendTypeName(b, t.Out(i), qualified, false)
+			}
+			b = append(b, ')')
+		}
+	case reflect.Struct:
+		b = append(b, "struct{ "...)
+		for i := 0; i < t.NumField(); i++ {
+			if i > 0 {
+				b = append(b, "; "...)
+			}
+			sf := t.Field(i)
+			if !sf.Anonymous {
+				if qualified && sf.PkgPath != "" {
+					b = append(b, '"')
+					b = append(b, sf.PkgPath...)
+					b = append(b, '"')
+					b = append(b, '.')
+				}
+				b = append(b, sf.Name...)
+				b = append(b, ' ')
+			}
+			b = appendTypeName(b, sf.Type, qualified, false)
+			if sf.Tag != "" {
+				b = append(b, ' ')
+				b = strconv.AppendQuote(b, string(sf.Tag))
+			}
+		}
+		if b[len(b)-1] == ' ' {
+			b = b[:len(b)-1]
+		} else {
+			b = append(b, ' ')
+		}
+		b = append(b, '}')
+	case reflect.Slice, reflect.Array:
+		b = append(b, '[')
+		if k == reflect.Array {
+			b = strconv.AppendUint(b, uint64(t.Len()), 10)
+		}
+		b = append(b, ']')
+		b = appendTypeName(b, t.Elem(), qualified, false)
+	case reflect.Map:
+		b = append(b, "map["...)
+		b = appendTypeName(b, t.Key(), qualified, false)
+		b = append(b, ']')
+		b = appendTypeName(b, t.Elem(), qualified, false)
+	case reflect.Ptr:
+		b = append(b, '*')
+		b = appendTypeName(b, t.Elem(), qualified, false)
+	case reflect.Interface:
+		b = append(b, "interface{ "...)
+		for i := 0; i < t.NumMethod(); i++ {
+			if i > 0 {
+				b = append(b, "; "...)
+			}
+			m := t.Method(i)
+			if qualified && m.PkgPath != "" {
+				b = append(b, '"')
+				b = append(b, m.PkgPath...)
+				b = append(b, '"')
+				b = append(b, '.')
+			}
+			b = append(b, m.Name...)
+			b = appendTypeName(b, m.Type, qualified, true)
+		}
+		if b[len(b)-1] == ' ' {
+			b = b[:len(b)-1]
+		} else {
+			b = append(b, ' ')
+		}
+		b = append(b, '}')
+	default:
+		panic("invalid kind: " + k.String())
+	}
+	return b
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
new file mode 100644
index 0000000..e5dfff6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
@@ -0,0 +1,34 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+	p unsafe.Pointer
+	t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+	// The proper representation of a pointer is unsafe.Pointer,
+	// which is necessary if the GC ever uses a moving collector.
+	return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p Pointer) IsNil() bool {
+	return p.p == nil
+}
+
+// Uintptr returns the pointer as a uintptr.
+func (p Pointer) Uintptr() uintptr {
+	return uintptr(p.p)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 0000000..98533b0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+	if len(vs) == 0 {
+		return vs
+	}
+
+	// Sort the map keys.
+	sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
+
+	// Deduplicate keys (fails for NaNs).
+	vs2 := vs[:1]
+	for _, v := range vs[1:] {
+		if isLess(vs2[len(vs2)-1], v) {
+			vs2 = append(vs2, v)
+		}
+	}
+	return vs2
+}
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+	switch x.Type().Kind() {
+	case reflect.Bool:
+		return !x.Bool() && y.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return x.Int() < y.Int()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return x.Uint() < y.Uint()
+	case reflect.Float32, reflect.Float64:
+		// NOTE: This does not sort -0 as less than +0
+		// since Go maps treat -0 and +0 as equal keys.
+		fx, fy := x.Float(), y.Float()
+		return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+	case reflect.Complex64, reflect.Complex128:
+		cx, cy := x.Complex(), y.Complex()
+		rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+		if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+			return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+		}
+		return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+	case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+		return x.Pointer() < y.Pointer()
+	case reflect.String:
+		return x.String() < y.String()
+	case reflect.Array:
+		for i := 0; i < x.Len(); i++ {
+			if isLess(x.Index(i), y.Index(i)) {
+				return true
+			}
+			if isLess(y.Index(i), x.Index(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Struct:
+		for i := 0; i < x.NumField(); i++ {
+			if isLess(x.Field(i), y.Field(i)) {
+				return true
+			}
+			if isLess(y.Field(i), x.Field(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Interface:
+		vx, vy := x.Elem(), y.Elem()
+		if !vx.IsValid() || !vy.IsValid() {
+			return !vx.IsValid() && vy.IsValid()
+		}
+		tx, ty := vx.Type(), vy.Type()
+		if tx == ty {
+			return isLess(x.Elem(), y.Elem())
+		}
+		if tx.Kind() != ty.Kind() {
+			return vx.Kind() < vy.Kind()
+		}
+		if tx.String() != ty.String() {
+			return tx.String() < ty.String()
+		}
+		if tx.PkgPath() != ty.PkgPath() {
+			return tx.PkgPath() < ty.PkgPath()
+		}
+		// This can happen in rare situations, so we fallback to just comparing
+		// the unique pointer for a reflect.Type. This guarantees deterministic
+		// ordering within a program, but it is obviously not stable.
+		return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+	default:
+		// Must be Func, Map, or Slice; which are not comparable.
+		panic(fmt.Sprintf("%T is not comparable", x.Type()))
+	}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 0000000..ba3fce8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,562 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"regexp"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of [Equal] and [Diff]. In particular,
+// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters ([FilterPath] and
+// [FilterValues]) to control the scope over which they are applied.
+//
+// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
+// for creating options that may be used with [Equal] and [Diff].
+type Option interface {
+	// filter applies all filters and returns the option that remains.
+	// Each option may only read s.curPath and call s.callTTBFunc.
+	//
+	// An Options is returned only if multiple comparers or transformers
+	// can apply simultaneously and will only contain values of those types
+	// or sub-Options containing values of those types.
+	filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
+}
+
+// applicableOption represents the following types:
+//
+//	Fundamental: ignore | validator | *comparer | *transformer
+//	Grouping:    Options
+type applicableOption interface {
+	Option
+
+	// apply executes the option, which may mutate s or panic.
+	apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//
+//	Fundamental: ignore | validator | *comparer | *transformer
+//	Filters:     *pathFilter | *valuesFilter
+type coreOption interface {
+	Option
+	isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of [Option] values that also satisfies the [Option] interface.
+// Helper comparison packages may return an Options value when packing multiple
+// [Option] values into a single [Option]. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
+	for _, opt := range opts {
+		switch opt := opt.filter(s, t, vx, vy); opt.(type) {
+		case ignore:
+			return ignore{} // Only ignore can short-circuit evaluation
+		case validator:
+			out = validator{} // Takes precedence over comparer or transformer
+		case *comparer, *transformer, Options:
+			switch out.(type) {
+			case nil:
+				out = opt
+			case validator:
+				// Keep validator
+			case *comparer, *transformer, Options:
+				out = Options{out, opt} // Conflicting comparers or transformers
+			}
+		}
+	}
+	return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+	const warning = "ambiguous set of applicable options"
+	const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+	var ss []string
+	for _, opt := range flattenOptions(nil, opts) {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	set := strings.Join(ss, "\n\t")
+	panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+	var ss []string
+	for _, opt := range opts {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new [Option] where opt is only evaluated if filter f
+// returns true for the current [Path] in the value tree.
+//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterPath(f func(Path) bool, opt Option) Option {
+	if f == nil {
+		panic("invalid path filter function")
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		return &pathFilter{fnc: f, opt: opt}
+	}
+	return nil
+}
+
+type pathFilter struct {
+	core
+	fnc func(Path) bool
+	opt Option
+}
+
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+	if f.fnc(s.curPath) {
+		return f.opt.filter(s, t, vx, vy)
+	}
+	return nil
+}
+
+func (f pathFilter) String() string {
+	return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
+}
+
+// FilterValues returns a new [Option] where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterValues(f interface{}, opt Option) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+		panic(fmt.Sprintf("invalid values filter function: %T", f))
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		vf := &valuesFilter{fnc: v, opt: opt}
+		if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+			vf.typ = ti
+		}
+		return vf
+	}
+	return nil
+}
+
+type valuesFilter struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+	opt Option
+}
+
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+	if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+		return nil
+	}
+	if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+		return f.opt.filter(s, t, vx, vy)
+	}
+	return nil
+}
+
+func (f valuesFilter) String() string {
+	return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
+}
+
+// Ignore is an [Option] that causes all comparisons to be ignored.
+// This value is intended to be combined with [FilterPath] or [FilterValues].
+// It is an error to pass an unfiltered Ignore option to [Equal].
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool                                                     { return false }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value)                                   { s.report(true, reportByIgnore) }
+func (ignore) String() string                                                       { return "Ignore()" }
+
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+	if !vx.IsValid() || !vy.IsValid() {
+		return validator{}
+	}
+	if !vx.CanInterface() || !vy.CanInterface() {
+		return validator{}
+	}
+	return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+	// Implies missing slice element or map entry.
+	if !vx.IsValid() || !vy.IsValid() {
+		s.report(vx.IsValid() == vy.IsValid(), 0)
+		return
+	}
+
+	// Unable to Interface implies unexported field without visibility access.
+	if !vx.CanInterface() || !vy.CanInterface() {
+		help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+		var name string
+		if t := s.curPath.Index(-2).Type(); t.Name() != "" {
+			// Named type with unexported fields.
+			name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+			isProtoMessage := func(t reflect.Type) bool {
+				m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
+				return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
+					m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
+					m.Type.Out(0).Name() == "Message"
+			}
+			if isProtoMessage(t) {
+				help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
+			} else if _, ok := reflect.New(t).Interface().(error); ok {
+				help = "consider using cmpopts.EquateErrors to compare error values"
+			} else if t.Comparable() {
+				help = "consider using cmpopts.EquateComparable to compare comparable Go types"
+			}
+		} else {
+			// Unnamed type with unexported fields. Derive PkgPath from field.
+			var pkgPath string
+			for i := 0; i < t.NumField() && pkgPath == ""; i++ {
+				pkgPath = t.Field(i).PkgPath
+			}
+			name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
+		}
+		panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
+	}
+
+	panic("not reachable")
+}
+
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
+// Transformer returns an [Option] that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the [Path] since the last non-[Transform] step.
+// For situations where the implicit filter is still insufficient,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
+// which adds a filter to prevent the transformer from
+// being recursively applied upon itself.
+//
+// The name is a user provided label that is used as the [Transform.Name] in the
+// transformation [PathStep] (and eventually shown in the [Diff] output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+		panic(fmt.Sprintf("invalid transformer function: %T", f))
+	}
+	if name == "" {
+		name = function.NameOf(v)
+		if !identsRx.MatchString(name) {
+			name = "λ" // Lambda-symbol as placeholder name
+		}
+	} else if !identsRx.MatchString(name) {
+		panic(fmt.Sprintf("invalid name: %q", name))
+	}
+	tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		tr.typ = ti
+	}
+	return tr
+}
+
+type transformer struct {
+	core
+	name string
+	typ  reflect.Type  // T
+	fnc  reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+	for i := len(s.curPath) - 1; i >= 0; i-- {
+		if t, ok := s.curPath[i].(Transform); !ok {
+			break // Hit most recent non-Transform step
+		} else if tr == t.trans {
+			return nil // Cannot directly use same Transform
+		}
+	}
+	if tr.typ == nil || t.AssignableTo(tr.typ) {
+		return tr
+	}
+	return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+	step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+	vvx := s.callTRFunc(tr.fnc, vx, step)
+	vvy := s.callTRFunc(tr.fnc, vy, step)
+	step.vx, step.vy = vvx, vvy
+	s.compareAny(step)
+}
+
+func (tr transformer) String() string {
+	return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
+}
+
+// Comparer returns an [Option] that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+//   - Symmetric: equal(x, y) == equal(y, x)
+//   - Deterministic: equal(x, y) == equal(x, y)
+//   - Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+		panic(fmt.Sprintf("invalid comparer function: %T", f))
+	}
+	cm := &comparer{fnc: v}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		cm.typ = ti
+	}
+	return cm
+}
+
+type comparer struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+	if cm.typ == nil || t.AssignableTo(cm.typ) {
+		return cm
+	}
+	return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+	eq := s.callTTBFunc(cm.fnc, vx, vy)
+	s.report(eq, reportByFunc)
+}
+
+func (cm comparer) String() string {
+	return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
+}
+
+// Exporter returns an [Option] that specifies whether [Equal] is allowed to
+// introspect into the unexported fields of certain struct types.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of [Equal]
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// In many cases, a custom [Comparer] should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the [reflect.Type] documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *[regexp.Regexp] types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using [Comparer] options:
+//
+//	Comparer(func(x, y reflect.Type) bool { return x == y })
+//	Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
+// option can be used to ignore all unexported fields on specified struct types.
+func Exporter(f func(reflect.Type) bool) Option {
+	return exporter(f)
+}
+
+type exporter func(reflect.Type) bool
+
+func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+	panic("not implemented")
+}
+
+// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
+// unexported fields of the specified struct types.
+//
+// See [Exporter] for the proper use of this option.
+func AllowUnexported(types ...interface{}) Option {
+	m := make(map[reflect.Type]bool)
+	for _, typ := range types {
+		t := reflect.TypeOf(typ)
+		if t.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("invalid struct type: %T", typ))
+		}
+		m[t] = true
+	}
+	return exporter(func(t reflect.Type) bool { return m[t] })
+}
+
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Report (see [Reporter]).
+type Result struct {
+	_     [0]func() // Make Result incomparable
+	flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+	return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if [Result.Equal] reports false.
+func (r Result) ByIgnore() bool {
+	return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+	return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a [Comparer] function determined equality.
+func (r Result) ByFunc() bool {
+	return r.flags&reportByFunc != 0
+}
+
+// ByCycle reports whether a reference cycle was detected.
+func (r Result) ByCycle() bool {
+	return r.flags&reportByCycle != 0
+}
+
+type resultFlags uint
+
+const (
+	_ resultFlags = (1 << iota) / 2
+
+	reportEqual
+	reportUnequal
+	reportByIgnore
+	reportByMethod
+	reportByFunc
+	reportByCycle
+)
+
+// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+	// PushStep is called when a tree-traversal operation is performed.
+	// The PathStep itself is only valid until the step is popped.
+	// The PathStep.Values are valid for the duration of the entire traversal
+	// and must not be mutated.
+	//
+	// Equal always calls PushStep at the start to provide an operation-less
+	// PathStep used to report the root values.
+	//
+	// Within a slice, the exact set of inserted, removed, or modified elements
+	// is unspecified and may change in future implementations.
+	// The entries of a map are iterated through in an unspecified order.
+	PushStep(PathStep)
+
+	// Report is called exactly once on leaf nodes to report whether the
+	// comparison identified the node as equal, unequal, or ignored.
+	// A leaf node is one that is immediately preceded by and followed by
+	// a pair of PushStep and PopStep calls.
+	Report(Result)
+
+	// PopStep ascends back up the value tree.
+	// There is always a matching pop call for every push call.
+	PopStep()
+}) Option {
+	return reporter{r}
+}
+
+type reporter struct{ reporterIface }
+type reporterIface interface {
+	PushStep(PathStep)
+	Report(Result)
+	PopStep()
+}
+
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+	panic("not implemented")
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+	switch opts := flattenOptions(nil, Options{src}); len(opts) {
+	case 0:
+		return nil
+	case 1:
+		return opts[0]
+	default:
+		return opts
+	}
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+	for _, opt := range src {
+		switch opt := opt.(type) {
+		case nil:
+			continue
+		case Options:
+			dst = flattenOptions(dst, opt)
+		case coreOption:
+			dst = append(dst, opt)
+		default:
+			panic(fmt.Sprintf("invalid option type: %T", opt))
+		}
+	}
+	return dst
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 0000000..c3c1456
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,390 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+// Path is a list of [PathStep] describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less [PathStep] that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
+
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface:
+//   - [StructField]
+//   - [SliceIndex]
+//   - [MapIndex]
+//   - [Indirect]
+//   - [TypeAssertion]
+//   - [Transform]
+type PathStep interface {
+	String() string
+
+	// Type is the resulting type after performing the path step.
+	Type() reflect.Type
+
+	// Values is the resulting values after performing the path step.
+	// The type of each valid value is guaranteed to be identical to Type.
+	//
+	// In some cases, one or both may be invalid or have restrictions:
+	//   - For StructField, both are not interface-able if the current field
+	//     is unexported and the struct type is not explicitly permitted by
+	//     an Exporter to traverse unexported fields.
+	//   - For SliceIndex, one may be invalid if an element is missing from
+	//     either the x or y slice.
+	//   - For MapIndex, one may be invalid if an entry is missing from
+	//     either the x or y map.
+	//
+	// The provided values must not be mutated.
+	Values() (vx, vy reflect.Value)
+}
+
+var (
+	_ PathStep = StructField{}
+	_ PathStep = SliceIndex{}
+	_ PathStep = MapIndex{}
+	_ PathStep = Indirect{}
+	_ PathStep = TypeAssertion{}
+	_ PathStep = Transform{}
+)
+
+func (pa *Path) push(s PathStep) {
+	*pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+	*pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last [PathStep] in the Path.
+// If the path is empty, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Last() PathStep {
+	return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Index(i int) PathStep {
+	if i < 0 {
+		i = len(pa) + i
+	}
+	if i < 0 || i >= len(pa) {
+		return pathStep{}
+	}
+	return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//
+//	MyMap.MySlices.MyField
+func (pa Path) String() string {
+	var ss []string
+	for _, s := range pa {
+		if _, ok := s.(StructField); ok {
+			ss = append(ss, s.String())
+		}
+	}
+	return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//
+//	(*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+	var ssPre, ssPost []string
+	var numIndirect int
+	for i, s := range pa {
+		var nextStep PathStep
+		if i+1 < len(pa) {
+			nextStep = pa[i+1]
+		}
+		switch s := s.(type) {
+		case Indirect:
+			numIndirect++
+			pPre, pPost := "(", ")"
+			switch nextStep.(type) {
+			case Indirect:
+				continue // Next step is indirection, so let them batch up
+			case StructField:
+				numIndirect-- // Automatic indirection on struct fields
+			case nil:
+				pPre, pPost = "", "" // Last step; no need for parenthesis
+			}
+			if numIndirect > 0 {
+				ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+				ssPost = append(ssPost, pPost)
+			}
+			numIndirect = 0
+			continue
+		case Transform:
+			ssPre = append(ssPre, s.trans.name+"(")
+			ssPost = append(ssPost, ")")
+			continue
+		}
+		ssPost = append(ssPost, s.String())
+	}
+	for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+		ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+	}
+	return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type pathStep struct {
+	typ    reflect.Type
+	vx, vy reflect.Value
+}
+
+func (ps pathStep) Type() reflect.Type             { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
+func (ps pathStep) String() string {
+	if ps.typ == nil {
+		return "<nil>"
+	}
+	s := value.TypeString(ps.typ, false)
+	if s == "" || strings.ContainsAny(s, "{}\n") {
+		return "root" // Type too simple or complex to print
+	}
+	return fmt.Sprintf("{%s}", s)
+}
+
+// StructField is a [PathStep] that represents a struct field access
+// on a field called [StructField.Name].
+type StructField struct{ *structField }
+type structField struct {
+	pathStep
+	name string
+	idx  int
+
+	// These fields are used for forcibly accessing an unexported field.
+	// pvx, pvy, and field are only valid if unexported is true.
+	unexported bool
+	mayForce   bool                // Forcibly allow visibility
+	paddr      bool                // Was parent addressable?
+	pvx, pvy   reflect.Value       // Parent values (always addressable)
+	field      reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+	if !sf.unexported {
+		return sf.vx, sf.vy // CanInterface reports true
+	}
+
+	// Forcibly obtain read-write access to an unexported struct field.
+	if sf.mayForce {
+		vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
+		vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
+		return vx, vy // CanInterface reports true
+	}
+	return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See [reflect.Type.Field].
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is a [PathStep] that represents an index operation on
+// a slice or array at some index [SliceIndex.Key].
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+	pathStep
+	xkey, ykey int
+	isSlice    bool // False for reflect.Array
+}
+
+func (si SliceIndex) Type() reflect.Type             { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
+	switch {
+	case si.xkey == si.ykey:
+		return fmt.Sprintf("[%d]", si.xkey)
+	case si.ykey == -1:
+		// [5->?] means "I don't know where X[5] went"
+		return fmt.Sprintf("[%d->?]", si.xkey)
+	case si.xkey == -1:
+		// [?->3] means "I don't know where Y[3] came from"
+		return fmt.Sprintf("[?->%d]", si.ykey)
+	default:
+		// [5->3] means "X[5] moved to Y[3]"
+		return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+	}
+}
+
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
+	if si.xkey != si.ykey {
+		return -1
+	}
+	return si.xkey
+}
+
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
+// returned by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+	pathStep
+	key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type             { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string                 { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect is a [PathStep] that represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+	pathStep
+}
+
+func (in Indirect) Type() reflect.Type             { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string                 { return "*" }
+
+// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+	pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type             { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string                 { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
+
+// Transform is a [PathStep] that represents a transformation
+// from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+	pathStep
+	trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type             { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string                 { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the [Transformer].
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed [Transformer] option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
+
+// pointerPath represents a dual-stack of pointers encountered when
+// recursively traversing the x and y values. This data structure supports
+// detection of cycles and determining whether the cycles are equal.
+// In Go, cycles can occur via pointers, slices, and maps.
+//
+// The pointerPath uses a map to represent a stack; where descension into a
+// pointer pushes the address onto the stack, and ascension from a pointer
+// pops the address from the stack. Thus, when traversing into a pointer from
+// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
+// by checking whether the pointer has already been visited. The cycle detection
+// uses a separate stack for the x and y values.
+//
+// If a cycle is detected we need to determine whether the two pointers
+// should be considered equal. The definition of equality chosen by Equal
+// requires two graphs to have the same structure. To determine this, both the
+// x and y values must have a cycle where the previous pointers were also
+// encountered together as a pair.
+//
+// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
+// MapIndex with pointer information for the x and y values.
+// Suppose px and py are two pointers to compare, we then search the
+// Path for whether px was ever encountered in the Path history of x, and
+// similarly so with py. If either side has a cycle, the comparison is only
+// equal if both px and py have a cycle resulting from the same PathStep.
+//
+// Using a map as a stack is more performant as we can perform cycle detection
+// in O(1) instead of O(N) where N is len(Path).
+type pointerPath struct {
+	// mx is keyed by x pointers, where the value is the associated y pointer.
+	mx map[value.Pointer]value.Pointer
+	// my is keyed by y pointers, where the value is the associated x pointer.
+	my map[value.Pointer]value.Pointer
+}
+
+func (p *pointerPath) Init() {
+	p.mx = make(map[value.Pointer]value.Pointer)
+	p.my = make(map[value.Pointer]value.Pointer)
+}
+
+// Push indicates intent to descend into pointers vx and vy where
+// visited reports whether either has been seen before. If visited before,
+// equal reports whether both pointers were encountered together.
+// Pop must be called if and only if the pointers were never visited.
+//
+// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
+// and be non-nil.
+func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
+	px := value.PointerOf(vx)
+	py := value.PointerOf(vy)
+	_, ok1 := p.mx[px]
+	_, ok2 := p.my[py]
+	if ok1 || ok2 {
+		equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
+		return equal, true
+	}
+	p.mx[px] = py
+	p.my[py] = px
+	return false, false
+}
+
+// Pop ascends from pointers vx and vy.
+func (p pointerPath) Pop(vx, vy reflect.Value) {
+	delete(p.mx, value.PointerOf(vx))
+	delete(p.my, value.PointerOf(vy))
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+	r, _ := utf8.DecodeRuneInString(id)
+	return unicode.IsUpper(r)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 0000000..f43cd12
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,54 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+	root *valueNode
+	curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+	r.curr = r.curr.PushStep(ps)
+	if r.root == nil {
+		r.root = r.curr
+	}
+}
+func (r *defaultReporter) Report(rs Result) {
+	r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+	r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+	assert(r.root != nil && r.curr == nil)
+	if r.root.NumDiff == 0 {
+		return ""
+	}
+	ptrs := new(pointerReferences)
+	text := formatOptions{}.FormatDiff(r.root, ptrs)
+	resolveReferences(text)
+	return text.String()
+}
+
+func assert(ok bool) {
+	if !ok {
+		panic("assertion failure")
+	}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 0000000..2050bf6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,433 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+	diffUnknown   diffMode = 0
+	diffIdentical diffMode = ' '
+	diffRemoved   diffMode = '-'
+	diffInserted  diffMode = '+'
+)
+
+type typeMode int
+
+const (
+	// emitType always prints the type.
+	emitType typeMode = iota
+	// elideType never prints the type.
+	elideType
+	// autoType prints the type only for composite kinds
+	// (i.e., structs, slices, arrays, and maps).
+	autoType
+)
+
+type formatOptions struct {
+	// DiffMode controls the output mode of FormatDiff.
+	//
+	// If diffUnknown,   then produce a diff of the x and y values.
+	// If diffIdentical, then emit values as if they were equal.
+	// If diffRemoved,   then only emit x values (ignoring y values).
+	// If diffInserted,  then only emit y values (ignoring x values).
+	DiffMode diffMode
+
+	// TypeMode controls whether to print the type for the current node.
+	//
+	// As a general rule of thumb, we always print the type of the next node
+	// after an interface, and always elide the type of the next node after
+	// a slice or map node.
+	TypeMode typeMode
+
+	// formatValueOptions are options specific to printing reflect.Values.
+	formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+	opts.DiffMode = d
+	return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+	opts.TypeMode = t
+	return opts
+}
+func (opts formatOptions) WithVerbosity(level int) formatOptions {
+	opts.VerbosityLevel = level
+	opts.LimitVerbosity = true
+	return opts
+}
+func (opts formatOptions) verbosity() uint {
+	switch {
+	case opts.VerbosityLevel < 0:
+		return 0
+	case opts.VerbosityLevel > 16:
+		return 16 // some reasonable maximum to avoid shift overflow
+	default:
+		return uint(opts.VerbosityLevel)
+	}
+}
+
+const maxVerbosityPreset = 6
+
+// verbosityPreset modifies the verbosity settings given an index
+// between 0 and maxVerbosityPreset, inclusive.
+func verbosityPreset(opts formatOptions, i int) formatOptions {
+	opts.VerbosityLevel = int(opts.verbosity()) + 2*i
+	if i > 0 {
+		opts.AvoidStringer = true
+	}
+	if i >= maxVerbosityPreset {
+		opts.PrintAddresses = true
+		opts.QualifiedNames = true
+	}
+	return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
+	if opts.DiffMode == diffIdentical {
+		opts = opts.WithVerbosity(1)
+	} else if opts.verbosity() < 3 {
+		opts = opts.WithVerbosity(3)
+	}
+
+	// Check whether we have specialized formatting for this node.
+	// This is not necessary, but helpful for producing more readable outputs.
+	if opts.CanFormatDiffSlice(v) {
+		return opts.FormatDiffSlice(v)
+	}
+
+	var parentKind reflect.Kind
+	if v.parent != nil && v.parent.TransformerName == "" {
+		parentKind = v.parent.Type.Kind()
+	}
+
+	// For leaf nodes, format the value based on the reflect.Values alone.
+	// As a special case, treat equal []byte as a leaf nodes.
+	isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
+	isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
+	if v.MaxDepth == 0 || isEqualBytes {
+		switch opts.DiffMode {
+		case diffUnknown, diffIdentical:
+			// Format Equal.
+			if v.NumDiff == 0 {
+				outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
+				outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
+				if v.NumIgnored > 0 && v.NumSame == 0 {
+					return textEllipsis
+				} else if outx.Len() < outy.Len() {
+					return outx
+				} else {
+					return outy
+				}
+			}
+
+			// Format unequal.
+			assert(opts.DiffMode == diffUnknown)
+			var list textList
+			outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
+			outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
+			for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+				opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
+				outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
+				outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
+			}
+			if outx != nil {
+				list = append(list, textRecord{Diff: '-', Value: outx})
+			}
+			if outy != nil {
+				list = append(list, textRecord{Diff: '+', Value: outy})
+			}
+			return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+		case diffRemoved:
+			return opts.FormatValue(v.ValueX, parentKind, ptrs)
+		case diffInserted:
+			return opts.FormatValue(v.ValueY, parentKind, ptrs)
+		default:
+			panic("invalid diff mode")
+		}
+	}
+
+	// Register slice element to support cycle detection.
+	if parentKind == reflect.Slice {
+		ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
+		defer ptrs.Pop()
+		defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
+	}
+
+	// Descend into the child value node.
+	if v.TransformerName != "" {
+		out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+		out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
+		return opts.FormatType(v.Type, out)
+	} else {
+		switch k := v.Type.Kind(); k {
+		case reflect.Struct, reflect.Array, reflect.Slice:
+			out = opts.formatDiffList(v.Records, k, ptrs)
+			out = opts.FormatType(v.Type, out)
+		case reflect.Map:
+			// Register map to support cycle detection.
+			ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+			defer ptrs.Pop()
+
+			out = opts.formatDiffList(v.Records, k, ptrs)
+			out = wrapTrunkReferences(ptrRefs, out)
+			out = opts.FormatType(v.Type, out)
+		case reflect.Ptr:
+			// Register pointer to support cycle detection.
+			ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+			defer ptrs.Pop()
+
+			out = opts.FormatDiff(v.Value, ptrs)
+			out = wrapTrunkReferences(ptrRefs, out)
+			out = &textWrap{Prefix: "&", Value: out}
+		case reflect.Interface:
+			out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+		default:
+			panic(fmt.Sprintf("%v cannot have children", k))
+		}
+		return out
+	}
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
+	// Derive record name based on the data structure kind.
+	var name string
+	var formatKey func(reflect.Value) string
+	switch k {
+	case reflect.Struct:
+		name = "field"
+		opts = opts.WithTypeMode(autoType)
+		formatKey = func(v reflect.Value) string { return v.String() }
+	case reflect.Slice, reflect.Array:
+		name = "element"
+		opts = opts.WithTypeMode(elideType)
+		formatKey = func(reflect.Value) string { return "" }
+	case reflect.Map:
+		name = "entry"
+		opts = opts.WithTypeMode(elideType)
+		formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
+	}
+
+	maxLen := -1
+	if opts.LimitVerbosity {
+		if opts.DiffMode == diffIdentical {
+			maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+		} else {
+			maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
+		}
+		opts.VerbosityLevel--
+	}
+
+	// Handle unification.
+	switch opts.DiffMode {
+	case diffIdentical, diffRemoved, diffInserted:
+		var list textList
+		var deferredEllipsis bool // Add final "..." to indicate records were dropped
+		for _, r := range recs {
+			if len(list) == maxLen {
+				deferredEllipsis = true
+				break
+			}
+
+			// Elide struct fields that are zero value.
+			if k == reflect.Struct {
+				var isZero bool
+				switch opts.DiffMode {
+				case diffIdentical:
+					isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
+				case diffRemoved:
+					isZero = r.Value.ValueX.IsZero()
+				case diffInserted:
+					isZero = r.Value.ValueY.IsZero()
+				}
+				if isZero {
+					continue
+				}
+			}
+			// Elide ignored nodes.
+			if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+				deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+				if !deferredEllipsis {
+					list.AppendEllipsis(diffStats{})
+				}
+				continue
+			}
+			if out := opts.FormatDiff(r.Value, ptrs); out != nil {
+				list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+			}
+		}
+		if deferredEllipsis {
+			list.AppendEllipsis(diffStats{})
+		}
+		return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+	case diffUnknown:
+	default:
+		panic("invalid diff mode")
+	}
+
+	// Handle differencing.
+	var numDiffs int
+	var list textList
+	var keys []reflect.Value // invariant: len(list) == len(keys)
+	groups := coalesceAdjacentRecords(name, recs)
+	maxGroup := diffStats{Name: name}
+	for i, ds := range groups {
+		if maxLen >= 0 && numDiffs >= maxLen {
+			maxGroup = maxGroup.Append(ds)
+			continue
+		}
+
+		// Handle equal records.
+		if ds.NumDiff() == 0 {
+			// Compute the number of leading and trailing records to print.
+			var numLo, numHi int
+			numEqual := ds.NumIgnored + ds.NumIdentical
+			for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+				if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+					break
+				}
+				numLo++
+			}
+			for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+				if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+					break
+				}
+				numHi++
+			}
+			if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+				numHi++ // Avoid pointless coalescing of a single equal record
+			}
+
+			// Format the equal values.
+			for _, r := range recs[:numLo] {
+				out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+				list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+				keys = append(keys, r.Key)
+			}
+			if numEqual > numLo+numHi {
+				ds.NumIdentical -= numLo + numHi
+				list.AppendEllipsis(ds)
+				for len(keys) < len(list) {
+					keys = append(keys, reflect.Value{})
+				}
+			}
+			for _, r := range recs[numEqual-numHi : numEqual] {
+				out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+				list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+				keys = append(keys, r.Key)
+			}
+			recs = recs[numEqual:]
+			continue
+		}
+
+		// Handle unequal records.
+		for _, r := range recs[:ds.NumDiff()] {
+			switch {
+			case opts.CanFormatDiffSlice(r.Value):
+				out := opts.FormatDiffSlice(r.Value)
+				list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+				keys = append(keys, r.Key)
+			case r.Value.NumChildren == r.Value.MaxDepth:
+				outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+				outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+				for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+					opts2 := verbosityPreset(opts, i)
+					outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+					outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+				}
+				if outx != nil {
+					list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+					keys = append(keys, r.Key)
+				}
+				if outy != nil {
+					list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+					keys = append(keys, r.Key)
+				}
+			default:
+				out := opts.FormatDiff(r.Value, ptrs)
+				list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+				keys = append(keys, r.Key)
+			}
+		}
+		recs = recs[ds.NumDiff():]
+		numDiffs += ds.NumDiff()
+	}
+	if maxGroup.IsZero() {
+		assert(len(recs) == 0)
+	} else {
+		list.AppendEllipsis(maxGroup)
+		for len(keys) < len(list) {
+			keys = append(keys, reflect.Value{})
+		}
+	}
+	assert(len(list) == len(keys))
+
+	// For maps, the default formatting logic uses fmt.Stringer which may
+	// produce ambiguous output. Avoid calling String to disambiguate.
+	if k == reflect.Map {
+		var ambiguous bool
+		seenKeys := map[string]reflect.Value{}
+		for i, currKey := range keys {
+			if currKey.IsValid() {
+				strKey := list[i].Key
+				prevKey, seen := seenKeys[strKey]
+				if seen && prevKey.CanInterface() && currKey.CanInterface() {
+					ambiguous = prevKey.Interface() != currKey.Interface()
+					if ambiguous {
+						break
+					}
+				}
+				seenKeys[strKey] = currKey
+			}
+		}
+		if ambiguous {
+			for i, k := range keys {
+				if k.IsValid() {
+					list[i].Key = formatMapKey(k, true, ptrs)
+				}
+			}
+		}
+	}
+
+	return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+	var prevCase int // Arbitrary index into which case last occurred
+	lastStats := func(i int) *diffStats {
+		if prevCase != i {
+			groups = append(groups, diffStats{Name: name})
+			prevCase = i
+		}
+		return &groups[len(groups)-1]
+	}
+	for _, r := range recs {
+		switch rv := r.Value; {
+		case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+			lastStats(1).NumIgnored++
+		case rv.NumDiff == 0:
+			lastStats(1).NumIdentical++
+		case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+			lastStats(2).NumRemoved++
+		case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+			lastStats(2).NumInserted++
+		default:
+			lastStats(2).NumModified++
+		}
+	}
+	return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go
new file mode 100644
index 0000000..be31b33
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_references.go
@@ -0,0 +1,264 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/flags"
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+const (
+	pointerDelimPrefix = "⟪"
+	pointerDelimSuffix = "⟫"
+)
+
+// formatPointer prints the address of the pointer.
+func formatPointer(p value.Pointer, withDelims bool) string {
+	v := p.Uintptr()
+	if flags.Deterministic {
+		v = 0xdeadf00f // Only used for stable testing purposes
+	}
+	if withDelims {
+		return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
+	}
+	return formatHex(uint64(v))
+}
+
+// pointerReferences is a stack of pointers visited so far.
+type pointerReferences [][2]value.Pointer
+
+func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
+	if deref && vx.IsValid() {
+		vx = vx.Addr()
+	}
+	if deref && vy.IsValid() {
+		vy = vy.Addr()
+	}
+	switch d {
+	case diffUnknown, diffIdentical:
+		pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
+	case diffRemoved:
+		pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
+	case diffInserted:
+		pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
+	}
+	*ps = append(*ps, pp)
+	return pp
+}
+
+func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
+	p = value.PointerOf(v)
+	for _, pp := range *ps {
+		if p == pp[0] || p == pp[1] {
+			return p, true
+		}
+	}
+	*ps = append(*ps, [2]value.Pointer{p, p})
+	return p, false
+}
+
+func (ps *pointerReferences) Pop() {
+	*ps = (*ps)[:len(*ps)-1]
+}
+
+// trunkReferences is metadata for a textNode indicating that the sub-tree
+// represents the value for either pointer in a pair of references.
+type trunkReferences struct{ pp [2]value.Pointer }
+
+// trunkReference is metadata for a textNode indicating that the sub-tree
+// represents the value for the given pointer reference.
+type trunkReference struct{ p value.Pointer }
+
+// leafReference is metadata for a textNode indicating that the value is
+// truncated as it refers to another part of the tree (i.e., a trunk).
+type leafReference struct{ p value.Pointer }
+
+func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
+	switch {
+	case pp[0].IsNil():
+		return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
+	case pp[1].IsNil():
+		return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+	case pp[0] == pp[1]:
+		return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+	default:
+		return &textWrap{Value: s, Metadata: trunkReferences{pp}}
+	}
+}
+func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
+	var prefix string
+	if printAddress {
+		prefix = formatPointer(p, true)
+	}
+	return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
+}
+func makeLeafReference(p value.Pointer, printAddress bool) textNode {
+	out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
+	var prefix string
+	if printAddress {
+		prefix = formatPointer(p, true)
+	}
+	return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
+}
+
+// resolveReferences walks the textNode tree searching for any leaf reference
+// metadata and resolves each against the corresponding trunk references.
+// Since pointer addresses in memory are not particularly readable to the user,
+// it replaces each pointer value with an arbitrary and unique reference ID.
+func resolveReferences(s textNode) {
+	var walkNodes func(textNode, func(textNode))
+	walkNodes = func(s textNode, f func(textNode)) {
+		f(s)
+		switch s := s.(type) {
+		case *textWrap:
+			walkNodes(s.Value, f)
+		case textList:
+			for _, r := range s {
+				walkNodes(r.Value, f)
+			}
+		}
+	}
+
+	// Collect all trunks and leaves with reference metadata.
+	var trunks, leaves []*textWrap
+	walkNodes(s, func(s textNode) {
+		if s, ok := s.(*textWrap); ok {
+			switch s.Metadata.(type) {
+			case leafReference:
+				leaves = append(leaves, s)
+			case trunkReference, trunkReferences:
+				trunks = append(trunks, s)
+			}
+		}
+	})
+
+	// No leaf references to resolve.
+	if len(leaves) == 0 {
+		return
+	}
+
+	// Collect the set of all leaf references to resolve.
+	leafPtrs := make(map[value.Pointer]bool)
+	for _, leaf := range leaves {
+		leafPtrs[leaf.Metadata.(leafReference).p] = true
+	}
+
+	// Collect the set of trunk pointers that are always paired together.
+	// This allows us to assign a single ID to both pointers for brevity.
+	// If a pointer in a pair ever occurs by itself or as a different pair,
+	// then the pair is broken.
+	pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
+	unpair := func(p value.Pointer) {
+		if !pairedTrunkPtrs[p].IsNil() {
+			pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
+		}
+		pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
+	}
+	for _, trunk := range trunks {
+		switch p := trunk.Metadata.(type) {
+		case trunkReference:
+			unpair(p.p) // standalone pointer cannot be part of a pair
+		case trunkReferences:
+			p0, ok0 := pairedTrunkPtrs[p.pp[0]]
+			p1, ok1 := pairedTrunkPtrs[p.pp[1]]
+			switch {
+			case !ok0 && !ok1:
+				// Register the newly seen pair.
+				pairedTrunkPtrs[p.pp[0]] = p.pp[1]
+				pairedTrunkPtrs[p.pp[1]] = p.pp[0]
+			case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
+				// Exact pair already seen; do nothing.
+			default:
+				// Pair conflicts with some other pair; break all pairs.
+				unpair(p.pp[0])
+				unpair(p.pp[1])
+			}
+		}
+	}
+
+	// Correlate each pointer referenced by leaves to a unique identifier,
+	// and print the IDs for each trunk that matches those pointers.
+	var nextID uint
+	ptrIDs := make(map[value.Pointer]uint)
+	newID := func() uint {
+		id := nextID
+		nextID++
+		return id
+	}
+	for _, trunk := range trunks {
+		switch p := trunk.Metadata.(type) {
+		case trunkReference:
+			if print := leafPtrs[p.p]; print {
+				id, ok := ptrIDs[p.p]
+				if !ok {
+					id = newID()
+					ptrIDs[p.p] = id
+				}
+				trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+			}
+		case trunkReferences:
+			print0 := leafPtrs[p.pp[0]]
+			print1 := leafPtrs[p.pp[1]]
+			if print0 || print1 {
+				id0, ok0 := ptrIDs[p.pp[0]]
+				id1, ok1 := ptrIDs[p.pp[1]]
+				isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
+				if isPair {
+					var id uint
+					assert(ok0 == ok1) // must be seen together or not at all
+					if ok0 {
+						assert(id0 == id1) // must have the same ID
+						id = id0
+					} else {
+						id = newID()
+						ptrIDs[p.pp[0]] = id
+						ptrIDs[p.pp[1]] = id
+					}
+					trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+				} else {
+					if print0 && !ok0 {
+						id0 = newID()
+						ptrIDs[p.pp[0]] = id0
+					}
+					if print1 && !ok1 {
+						id1 = newID()
+						ptrIDs[p.pp[1]] = id1
+					}
+					switch {
+					case print0 && print1:
+						trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
+					case print0:
+						trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
+					case print1:
+						trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
+					}
+				}
+			}
+		}
+	}
+
+	// Update all leaf references with the unique identifier.
+	for _, leaf := range leaves {
+		if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
+			leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
+		}
+	}
+}
+
+func formatReference(id uint) string {
+	return fmt.Sprintf("ref#%d", id)
+}
+
+func updateReferencePrefix(prefix, ref string) string {
+	if prefix == "" {
+		return pointerDelimPrefix + ref + pointerDelimSuffix
+	}
+	suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
+	return pointerDelimPrefix + ref + ": " + suffix
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 0000000..e39f422
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,414 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+var (
+	anyType    = reflect.TypeOf((*interface{})(nil)).Elem()
+	stringType = reflect.TypeOf((*string)(nil)).Elem()
+	bytesType  = reflect.TypeOf((*[]byte)(nil)).Elem()
+	byteType   = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
+type formatValueOptions struct {
+	// AvoidStringer controls whether to avoid calling custom stringer
+	// methods like error.Error or fmt.Stringer.String.
+	AvoidStringer bool
+
+	// PrintAddresses controls whether to print the address of all pointers,
+	// slice elements, and maps.
+	PrintAddresses bool
+
+	// QualifiedNames controls whether FormatType uses the fully qualified name
+	// (including the full package path as opposed to just the package name).
+	QualifiedNames bool
+
+	// VerbosityLevel controls the amount of output to produce.
+	// A higher value produces more output. A value of zero or lower produces
+	// no output (represented using an ellipsis).
+	// If LimitVerbosity is false, then the level is treated as infinite.
+	VerbosityLevel int
+
+	// LimitVerbosity specifies that formatting should respect VerbosityLevel.
+	LimitVerbosity bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+	// Check whether to emit the type or not.
+	switch opts.TypeMode {
+	case autoType:
+		switch t.Kind() {
+		case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+			if s.Equal(textNil) {
+				return s
+			}
+		default:
+			return s
+		}
+		if opts.DiffMode == diffIdentical {
+			return s // elide type for identical nodes
+		}
+	case elideType:
+		return s
+	}
+
+	// Determine the type label, applying special handling for unnamed types.
+	typeName := value.TypeString(t, opts.QualifiedNames)
+	if t.Name() == "" {
+		// According to Go grammar, certain type literals contain symbols that
+		// do not strongly bind to the next lexicographical token (e.g., *T).
+		switch t.Kind() {
+		case reflect.Chan, reflect.Func, reflect.Ptr:
+			typeName = "(" + typeName + ")"
+		}
+	}
+	return &textWrap{Prefix: typeName, Value: wrapParens(s)}
+}
+
+// wrapParens wraps s with a set of parenthesis, but avoids it if the
+// wrapped node itself is already surrounded by a pair of parenthesis or braces.
+// It handles unwrapping one level of pointer-reference nodes.
+func wrapParens(s textNode) textNode {
+	var refNode *textWrap
+	if s2, ok := s.(*textWrap); ok {
+		// Unwrap a single pointer reference node.
+		switch s2.Metadata.(type) {
+		case leafReference, trunkReference, trunkReferences:
+			refNode = s2
+			if s3, ok := refNode.Value.(*textWrap); ok {
+				s2 = s3
+			}
+		}
+
+		// Already has delimiters that make parenthesis unnecessary.
+		hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
+		hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
+		if hasParens || hasBraces {
+			return s
+		}
+	}
+	if refNode != nil {
+		refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
+		return s
+	}
+	return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
+	if !v.IsValid() {
+		return nil
+	}
+	t := v.Type()
+
+	// Check slice element for cycles.
+	if parentKind == reflect.Slice {
+		ptrRef, visited := ptrs.Push(v.Addr())
+		if visited {
+			return makeLeafReference(ptrRef, false)
+		}
+		defer ptrs.Pop()
+		defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
+	}
+
+	// Check whether there is an Error or String method to call.
+	if !opts.AvoidStringer && v.CanInterface() {
+		// Avoid calling Error or String methods on nil receivers since many
+		// implementations crash when doing so.
+		if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+			var prefix, strVal string
+			func() {
+				// Swallow and ignore any panics from String or Error.
+				defer func() { recover() }()
+				switch v := v.Interface().(type) {
+				case error:
+					strVal = v.Error()
+					prefix = "e"
+				case fmt.Stringer:
+					strVal = v.String()
+					prefix = "s"
+				}
+			}()
+			if prefix != "" {
+				return opts.formatString(prefix, strVal)
+			}
+		}
+	}
+
+	// Check whether to explicitly wrap the result with the type.
+	var skipType bool
+	defer func() {
+		if !skipType {
+			out = opts.FormatType(t, out)
+		}
+	}()
+
+	switch t.Kind() {
+	case reflect.Bool:
+		return textLine(fmt.Sprint(v.Bool()))
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return textLine(fmt.Sprint(v.Int()))
+	case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return textLine(fmt.Sprint(v.Uint()))
+	case reflect.Uint8:
+		if parentKind == reflect.Slice || parentKind == reflect.Array {
+			return textLine(formatHex(v.Uint()))
+		}
+		return textLine(fmt.Sprint(v.Uint()))
+	case reflect.Uintptr:
+		return textLine(formatHex(v.Uint()))
+	case reflect.Float32, reflect.Float64:
+		return textLine(fmt.Sprint(v.Float()))
+	case reflect.Complex64, reflect.Complex128:
+		return textLine(fmt.Sprint(v.Complex()))
+	case reflect.String:
+		return opts.formatString("", v.String())
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		return textLine(formatPointer(value.PointerOf(v), true))
+	case reflect.Struct:
+		var list textList
+		v := makeAddressable(v) // needed for retrieveUnexportedField
+		maxLen := v.NumField()
+		if opts.LimitVerbosity {
+			maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+			opts.VerbosityLevel--
+		}
+		for i := 0; i < v.NumField(); i++ {
+			vv := v.Field(i)
+			if vv.IsZero() {
+				continue // Elide fields with zero values
+			}
+			if len(list) == maxLen {
+				list.AppendEllipsis(diffStats{})
+				break
+			}
+			sf := t.Field(i)
+			if !isExported(sf.Name) {
+				vv = retrieveUnexportedField(v, sf, true)
+			}
+			s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
+			list = append(list, textRecord{Key: sf.Name, Value: s})
+		}
+		return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+	case reflect.Slice:
+		if v.IsNil() {
+			return textNil
+		}
+
+		// Check whether this is a []byte of text data.
+		if t.Elem() == byteType {
+			b := v.Bytes()
+			isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
+			if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+				out = opts.formatString("", string(b))
+				skipType = true
+				return opts.FormatType(t, out)
+			}
+		}
+
+		fallthrough
+	case reflect.Array:
+		maxLen := v.Len()
+		if opts.LimitVerbosity {
+			maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+			opts.VerbosityLevel--
+		}
+		var list textList
+		for i := 0; i < v.Len(); i++ {
+			if len(list) == maxLen {
+				list.AppendEllipsis(diffStats{})
+				break
+			}
+			s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
+			list = append(list, textRecord{Value: s})
+		}
+
+		out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+		if t.Kind() == reflect.Slice && opts.PrintAddresses {
+			header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
+			out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
+		}
+		return out
+	case reflect.Map:
+		if v.IsNil() {
+			return textNil
+		}
+
+		// Check pointer for cycles.
+		ptrRef, visited := ptrs.Push(v)
+		if visited {
+			return makeLeafReference(ptrRef, opts.PrintAddresses)
+		}
+		defer ptrs.Pop()
+
+		maxLen := v.Len()
+		if opts.LimitVerbosity {
+			maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+			opts.VerbosityLevel--
+		}
+		var list textList
+		for _, k := range value.SortKeys(v.MapKeys()) {
+			if len(list) == maxLen {
+				list.AppendEllipsis(diffStats{})
+				break
+			}
+			sk := formatMapKey(k, false, ptrs)
+			sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
+			list = append(list, textRecord{Key: sk, Value: sv})
+		}
+
+		out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+		out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+		return out
+	case reflect.Ptr:
+		if v.IsNil() {
+			return textNil
+		}
+
+		// Check pointer for cycles.
+		ptrRef, visited := ptrs.Push(v)
+		if visited {
+			out = makeLeafReference(ptrRef, opts.PrintAddresses)
+			return &textWrap{Prefix: "&", Value: out}
+		}
+		defer ptrs.Pop()
+
+		// Skip the name only if this is an unnamed pointer type.
+		// Otherwise taking the address of a value does not reproduce
+		// the named pointer type.
+		if v.Type().Name() == "" {
+			skipType = true // Let the underlying value print the type instead
+		}
+		out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
+		out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+		out = &textWrap{Prefix: "&", Value: out}
+		return out
+	case reflect.Interface:
+		if v.IsNil() {
+			return textNil
+		}
+		// Interfaces accept different concrete types,
+		// so configure the underlying value to explicitly print the type.
+		return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
+	default:
+		panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+	}
+}
+
+func (opts formatOptions) formatString(prefix, s string) textNode {
+	maxLen := len(s)
+	maxLines := strings.Count(s, "\n") + 1
+	if opts.LimitVerbosity {
+		maxLen = (1 << opts.verbosity()) << 5   // 32, 64, 128, 256, etc...
+		maxLines = (1 << opts.verbosity()) << 2 //  4, 8, 16, 32, 64, etc...
+	}
+
+	// For multiline strings, use the triple-quote syntax,
+	// but only use it when printing removed or inserted nodes since
+	// we only want the extra verbosity for those cases.
+	lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+	isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+	for i := 0; i < len(lines) && isTripleQuoted; i++ {
+		lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+		isPrintable := func(r rune) bool {
+			return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+		}
+		line := lines[i]
+		isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+	}
+	if isTripleQuoted {
+		var list textList
+		list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+		for i, line := range lines {
+			if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+				comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+				list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+				break
+			}
+			list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+		}
+		list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+		return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+	}
+
+	// Format the string as a single-line quoted string.
+	if len(s) > maxLen+len(textEllipsis) {
+		return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+	}
+	return textLine(prefix + formatString(s))
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
+	var opts formatOptions
+	opts.DiffMode = diffIdentical
+	opts.TypeMode = elideType
+	opts.PrintAddresses = disambiguate
+	opts.AvoidStringer = disambiguate
+	opts.QualifiedNames = disambiguate
+	opts.VerbosityLevel = maxVerbosityPreset
+	opts.LimitVerbosity = true
+	s := opts.FormatValue(v, reflect.Map, ptrs).String()
+	return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+	// Use quoted string if it the same length as a raw string literal.
+	// Otherwise, attempt to use the raw string form.
+	qs := strconv.Quote(s)
+	if len(qs) == 1+len(s)+1 {
+		return qs
+	}
+
+	// Disallow newlines to ensure output is a single line.
+	// Only allow printable runes for readability purposes.
+	rawInvalid := func(r rune) bool {
+		return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+	}
+	if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
+		return "`" + s + "`"
+	}
+	return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+	var f string
+	switch {
+	case u <= 0xff:
+		f = "0x%02x"
+	case u <= 0xffff:
+		f = "0x%04x"
+	case u <= 0xffffff:
+		f = "0x%06x"
+	case u <= 0xffffffff:
+		f = "0x%08x"
+	case u <= 0xffffffffff:
+		f = "0x%010x"
+	case u <= 0xffffffffffff:
+		f = "0x%012x"
+	case u <= 0xffffffffffffff:
+		f = "0x%014x"
+	case u <= 0xffffffffffffffff:
+		f = "0x%016x"
+	}
+	return fmt.Sprintf(f, u)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 0000000..23e444f
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,614 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+	switch {
+	case opts.DiffMode != diffUnknown:
+		return false // Must be formatting in diff mode
+	case v.NumDiff == 0:
+		return false // No differences detected
+	case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+		return false // Both values must be valid
+	case v.NumIgnored > 0:
+		return false // Some ignore option was used
+	case v.NumTransformed > 0:
+		return false // Some transform option was used
+	case v.NumCompared > 1:
+		return false // More than one comparison was used
+	case v.NumCompared == 1 && v.Type.Name() != "":
+		// The need for cmp to check applicability of options on every element
+		// in a slice is a significant performance detriment for large []byte.
+		// The workaround is to specify Comparer(bytes.Equal),
+		// which enables cmp to compare []byte more efficiently.
+		// If they differ, we still want to provide batched diffing.
+		// The logic disallows named types since they tend to have their own
+		// String method, with nicer formatting than what this provides.
+		return false
+	}
+
+	// Check whether this is an interface with the same concrete types.
+	t := v.Type
+	vx, vy := v.ValueX, v.ValueY
+	if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+		vx, vy = vx.Elem(), vy.Elem()
+		t = vx.Type()
+	}
+
+	// Check whether we provide specialized diffing for this type.
+	switch t.Kind() {
+	case reflect.String:
+	case reflect.Array, reflect.Slice:
+		// Only slices of primitive types have specialized handling.
+		switch t.Elem().Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+			reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+			reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+		default:
+			return false
+		}
+
+		// Both slice values have to be non-empty.
+		if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+			return false
+		}
+
+		// If a sufficient number of elements already differ,
+		// use specialized formatting even if length requirement is not met.
+		if v.NumDiff > v.NumSame {
+			return true
+		}
+	default:
+		return false
+	}
+
+	// Use specialized string diffing for longer slices or strings.
+	const minLength = 32
+	return vx.Len() >= minLength && vy.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+	assert(opts.DiffMode == diffUnknown)
+	t, vx, vy := v.Type, v.ValueX, v.ValueY
+	if t.Kind() == reflect.Interface {
+		vx, vy = vx.Elem(), vy.Elem()
+		t = vx.Type()
+		opts = opts.WithTypeMode(emitType)
+	}
+
+	// Auto-detect the type of the data.
+	var sx, sy string
+	var ssx, ssy []string
+	var isString, isMostlyText, isPureLinedText, isBinary bool
+	switch {
+	case t.Kind() == reflect.String:
+		sx, sy = vx.String(), vy.String()
+		isString = true
+	case t.Kind() == reflect.Slice && t.Elem() == byteType:
+		sx, sy = string(vx.Bytes()), string(vy.Bytes())
+		isString = true
+	case t.Kind() == reflect.Array:
+		// Arrays need to be addressable for slice operations to work.
+		vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+		vx2.Set(vx)
+		vy2.Set(vy)
+		vx, vy = vx2, vy2
+	}
+	if isString {
+		var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
+		for i, r := range sx + sy {
+			numTotalRunes++
+			if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+				numValidRunes++
+			}
+			if r == '\n' {
+				if maxLineLen < i-lastLineIdx {
+					maxLineLen = i - lastLineIdx
+				}
+				lastLineIdx = i + 1
+				numLines++
+			}
+		}
+		isPureText := numValidRunes == numTotalRunes
+		isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+		isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+		isBinary = !isMostlyText
+
+		// Avoid diffing by lines if it produces a significantly more complex
+		// edit script than diffing by bytes.
+		if isPureLinedText {
+			ssx = strings.Split(sx, "\n")
+			ssy = strings.Split(sy, "\n")
+			esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+				return diff.BoolResult(ssx[ix] == ssy[iy])
+			})
+			esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+				return diff.BoolResult(sx[ix] == sy[iy])
+			})
+			efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+			efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+			quotedLength := len(strconv.Quote(sx + sy))
+			unquotedLength := len(sx) + len(sy)
+			escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+			isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
+		}
+	}
+
+	// Format the string into printable records.
+	var list textList
+	var delim string
+	switch {
+	// If the text appears to be multi-lined text,
+	// then perform differencing across individual lines.
+	case isPureLinedText:
+		list = opts.formatDiffSlice(
+			reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+			func(v reflect.Value, d diffMode) textRecord {
+				s := formatString(v.Index(0).String())
+				return textRecord{Diff: d, Value: textLine(s)}
+			},
+		)
+		delim = "\n"
+
+		// If possible, use a custom triple-quote (""") syntax for printing
+		// differences in a string literal. This format is more readable,
+		// but has edge-cases where differences are visually indistinguishable.
+		// This format is avoided under the following conditions:
+		//   - A line starts with `"""`
+		//   - A line starts with "..."
+		//   - A line contains non-printable characters
+		//   - Adjacent different lines differ only by whitespace
+		//
+		// For example:
+		//
+		//		"""
+		//		... // 3 identical lines
+		//		foo
+		//		bar
+		//	-	baz
+		//	+	BAZ
+		//		"""
+		isTripleQuoted := true
+		prevRemoveLines := map[string]bool{}
+		prevInsertLines := map[string]bool{}
+		var list2 textList
+		list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+		for _, r := range list {
+			if !r.Value.Equal(textEllipsis) {
+				line, _ := strconv.Unquote(string(r.Value.(textLine)))
+				line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+				normLine := strings.Map(func(r rune) rune {
+					if unicode.IsSpace(r) {
+						return -1 // drop whitespace to avoid visually indistinguishable output
+					}
+					return r
+				}, line)
+				isPrintable := func(r rune) bool {
+					return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+				}
+				isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
+				switch r.Diff {
+				case diffRemoved:
+					isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
+					prevRemoveLines[normLine] = true
+				case diffInserted:
+					isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
+					prevInsertLines[normLine] = true
+				}
+				if !isTripleQuoted {
+					break
+				}
+				r.Value = textLine(line)
+				r.ElideComma = true
+			}
+			if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
+				prevRemoveLines = map[string]bool{}
+				prevInsertLines = map[string]bool{}
+			}
+			list2 = append(list2, r)
+		}
+		if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
+			list2 = list2[:len(list2)-1] // elide single empty line at the end
+		}
+		list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+		if isTripleQuoted {
+			var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
+			switch t.Kind() {
+			case reflect.String:
+				if t != stringType {
+					out = opts.FormatType(t, out)
+				}
+			case reflect.Slice:
+				// Always emit type for slices since the triple-quote syntax
+				// looks like a string (not a slice).
+				opts = opts.WithTypeMode(emitType)
+				out = opts.FormatType(t, out)
+			}
+			return out
+		}
+
+	// If the text appears to be single-lined text,
+	// then perform differencing in approximately fixed-sized chunks.
+	// The output is printed as quoted strings.
+	case isMostlyText:
+		list = opts.formatDiffSlice(
+			reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+			func(v reflect.Value, d diffMode) textRecord {
+				s := formatString(v.String())
+				return textRecord{Diff: d, Value: textLine(s)}
+			},
+		)
+
+	// If the text appears to be binary data,
+	// then perform differencing in approximately fixed-sized chunks.
+	// The output is inspired by hexdump.
+	case isBinary:
+		list = opts.formatDiffSlice(
+			reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+			func(v reflect.Value, d diffMode) textRecord {
+				var ss []string
+				for i := 0; i < v.Len(); i++ {
+					ss = append(ss, formatHex(v.Index(i).Uint()))
+				}
+				s := strings.Join(ss, ", ")
+				comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+				return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+			},
+		)
+
+	// For all other slices of primitive types,
+	// then perform differencing in approximately fixed-sized chunks.
+	// The size of each chunk depends on the width of the element kind.
+	default:
+		var chunkSize int
+		if t.Elem().Kind() == reflect.Bool {
+			chunkSize = 16
+		} else {
+			switch t.Elem().Bits() {
+			case 8:
+				chunkSize = 16
+			case 16:
+				chunkSize = 12
+			case 32:
+				chunkSize = 8
+			default:
+				chunkSize = 8
+			}
+		}
+		list = opts.formatDiffSlice(
+			vx, vy, chunkSize, t.Elem().Kind().String(),
+			func(v reflect.Value, d diffMode) textRecord {
+				var ss []string
+				for i := 0; i < v.Len(); i++ {
+					switch t.Elem().Kind() {
+					case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+						ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+					case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+						ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
+					case reflect.Uint8, reflect.Uintptr:
+						ss = append(ss, formatHex(v.Index(i).Uint()))
+					case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+						ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+					}
+				}
+				s := strings.Join(ss, ", ")
+				return textRecord{Diff: d, Value: textLine(s)}
+			},
+		)
+	}
+
+	// Wrap the output with appropriate type information.
+	var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+	if !isMostlyText {
+		// The "{...}" byte-sequence literal is not valid Go syntax for strings.
+		// Emit the type for extra clarity (e.g. "string{...}").
+		if t.Kind() == reflect.String {
+			opts = opts.WithTypeMode(emitType)
+		}
+		return opts.FormatType(t, out)
+	}
+	switch t.Kind() {
+	case reflect.String:
+		out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+		if t != stringType {
+			out = opts.FormatType(t, out)
+		}
+	case reflect.Slice:
+		out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+		if t != bytesType {
+			out = opts.FormatType(t, out)
+		}
+	}
+	return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+	b := bytes.Repeat([]byte{'.'}, len(s))
+	for i := 0; i < len(s); i++ {
+		if ' ' <= s[i] && s[i] <= '~' {
+			b[i] = s[i]
+		}
+	}
+	return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+	vx, vy reflect.Value, chunkSize int, name string,
+	makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+	eq := func(ix, iy int) bool {
+		return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+	}
+	es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+		return diff.BoolResult(eq(ix, iy))
+	})
+
+	appendChunks := func(v reflect.Value, d diffMode) int {
+		n0 := v.Len()
+		for v.Len() > 0 {
+			n := chunkSize
+			if n > v.Len() {
+				n = v.Len()
+			}
+			list = append(list, makeRec(v.Slice(0, n), d))
+			v = v.Slice(n, v.Len())
+		}
+		return n0 - v.Len()
+	}
+
+	var numDiffs int
+	maxLen := -1
+	if opts.LimitVerbosity {
+		maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+		opts.VerbosityLevel--
+	}
+
+	groups := coalesceAdjacentEdits(name, es)
+	groups = coalesceInterveningIdentical(groups, chunkSize/4)
+	groups = cleanupSurroundingIdentical(groups, eq)
+	maxGroup := diffStats{Name: name}
+	for i, ds := range groups {
+		if maxLen >= 0 && numDiffs >= maxLen {
+			maxGroup = maxGroup.Append(ds)
+			continue
+		}
+
+		// Print equal.
+		if ds.NumDiff() == 0 {
+			// Compute the number of leading and trailing equal bytes to print.
+			var numLo, numHi int
+			numEqual := ds.NumIgnored + ds.NumIdentical
+			for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+				numLo++
+			}
+			for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+				numHi++
+			}
+			if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+				numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+			}
+
+			// Print the equal bytes.
+			appendChunks(vx.Slice(0, numLo), diffIdentical)
+			if numEqual > numLo+numHi {
+				ds.NumIdentical -= numLo + numHi
+				list.AppendEllipsis(ds)
+			}
+			appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+			vx = vx.Slice(numEqual, vx.Len())
+			vy = vy.Slice(numEqual, vy.Len())
+			continue
+		}
+
+		// Print unequal.
+		len0 := len(list)
+		nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+		vx = vx.Slice(nx, vx.Len())
+		ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+		vy = vy.Slice(ny, vy.Len())
+		numDiffs += len(list) - len0
+	}
+	if maxGroup.IsZero() {
+		assert(vx.Len() == 0 && vy.Len() == 0)
+	} else {
+		list.AppendEllipsis(maxGroup)
+	}
+	return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+//
+// Example:
+//
+//	Input:  "..XXY...Y"
+//	Output: [
+//		{NumIdentical: 2},
+//		{NumRemoved: 2, NumInserted 1},
+//		{NumIdentical: 3},
+//		{NumInserted: 1},
+//	]
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+	var prevMode byte
+	lastStats := func(mode byte) *diffStats {
+		if prevMode != mode {
+			groups = append(groups, diffStats{Name: name})
+			prevMode = mode
+		}
+		return &groups[len(groups)-1]
+	}
+	for _, e := range es {
+		switch e {
+		case diff.Identity:
+			lastStats('=').NumIdentical++
+		case diff.UniqueX:
+			lastStats('!').NumRemoved++
+		case diff.UniqueY:
+			lastStats('!').NumInserted++
+		case diff.Modified:
+			lastStats('!').NumModified++
+		}
+	}
+	return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+//	WindowSize: 16,
+//	Input: [
+//		{NumIdentical: 61},              // group 0
+//		{NumRemoved: 3, NumInserted: 1}, // group 1
+//		{NumIdentical: 6},               // ├── coalesce
+//		{NumInserted: 2},                // ├── coalesce
+//		{NumIdentical: 1},               // ├── coalesce
+//		{NumRemoved: 9},                 // └── coalesce
+//		{NumIdentical: 64},              // group 2
+//		{NumRemoved: 3, NumInserted: 1}, // group 3
+//		{NumIdentical: 6},               // ├── coalesce
+//		{NumInserted: 2},                // ├── coalesce
+//		{NumIdentical: 1},               // ├── coalesce
+//		{NumRemoved: 7},                 // ├── coalesce
+//		{NumIdentical: 1},               // ├── coalesce
+//		{NumRemoved: 2},                 // └── coalesce
+//		{NumIdentical: 63},              // group 4
+//	]
+//	Output: [
+//		{NumIdentical: 61},
+//		{NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+//		{NumIdentical: 64},
+//		{NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+//		{NumIdentical: 63},
+//	]
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+	groups, groupsOrig := groups[:0], groups
+	for i, ds := range groupsOrig {
+		if len(groups) >= 2 && ds.NumDiff() > 0 {
+			prev := &groups[len(groups)-2] // Unequal group
+			curr := &groups[len(groups)-1] // Equal group
+			next := &groupsOrig[i]         // Unequal group
+			hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+			hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+			if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+				*prev = prev.Append(*curr).Append(*next)
+				groups = groups[:len(groups)-1] // Truncate off equal group
+				continue
+			}
+		}
+		groups = append(groups, ds)
+	}
+	return groups
+}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+//	Input: [
+//		{NumIdentical: 61},
+//		{NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+//		{NumIdentical: 67},
+//		{NumIdentical: 7, NumRemoved: 12, NumInserted: 3},  // assume 10 trailing identical elements
+//		{NumIdentical: 54},
+//	]
+//	Output: [
+//		{NumIdentical: 64}, // incremented by 3
+//		{NumRemoved: 9},
+//		{NumIdentical: 67},
+//		{NumRemoved: 9},
+//		{NumIdentical: 64}, // incremented by 10
+//	]
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+	var ix, iy int // indexes into sequence x and y
+	for i, ds := range groups {
+		// Handle equal group.
+		if ds.NumDiff() == 0 {
+			ix += ds.NumIdentical
+			iy += ds.NumIdentical
+			continue
+		}
+
+		// Handle unequal group.
+		nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+		ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+		var numLeadingIdentical, numTrailingIdentical int
+		for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
+			numLeadingIdentical++
+		}
+		for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
+			numTrailingIdentical++
+		}
+		if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+			if numLeadingIdentical > 0 {
+				// Remove leading identical span from this group and
+				// insert it into the preceding group.
+				if i-1 >= 0 {
+					groups[i-1].NumIdentical += numLeadingIdentical
+				} else {
+					// No preceding group exists, so prepend a new group,
+					// but do so after we finish iterating over all groups.
+					defer func() {
+						groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+					}()
+				}
+				// Increment indexes since the preceding group would have handled this.
+				ix += numLeadingIdentical
+				iy += numLeadingIdentical
+			}
+			if numTrailingIdentical > 0 {
+				// Remove trailing identical span from this group and
+				// insert it into the succeeding group.
+				if i+1 < len(groups) {
+					groups[i+1].NumIdentical += numTrailingIdentical
+				} else {
+					// No succeeding group exists, so append a new group,
+					// but do so after we finish iterating over all groups.
+					defer func() {
+						groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+					}()
+				}
+				// Do not increment indexes since the succeeding group will handle this.
+			}
+
+			// Update this group since some identical elements were removed.
+			nx -= numIdentical
+			ny -= numIdentical
+			groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+		}
+		ix += nx
+		iy += ny
+	}
+	return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 0000000..388fcf5
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,432 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+	"bytes"
+	"fmt"
+	"math/rand"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+const maxColumnLength = 80
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+	// The output of Diff is documented as being unstable to provide future
+	// flexibility in changing the output for more humanly readable reports.
+	// This logic intentionally introduces instability to the exact output
+	// so that users can detect accidental reliance on stability early on,
+	// rather than much later when an actual change to the format occurs.
+	if flags.Deterministic || randBool {
+		// Use regular spaces (U+0020).
+		switch d {
+		case diffUnknown, diffIdentical:
+			b = append(b, "  "...)
+		case diffRemoved:
+			b = append(b, "- "...)
+		case diffInserted:
+			b = append(b, "+ "...)
+		}
+	} else {
+		// Use non-breaking spaces (U+00a0).
+		switch d {
+		case diffUnknown, diffIdentical:
+			b = append(b, "  "...)
+		case diffRemoved:
+			b = append(b, "- "...)
+		case diffInserted:
+			b = append(b, "+ "...)
+		}
+	}
+	return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+	for ; n > 0; n-- {
+		b = append(b, c)
+	}
+	return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+	// Len reports the length in bytes of a single-line version of the tree.
+	// Nested textRecord.Diff and textRecord.Comment fields are ignored.
+	Len() int
+	// Equal reports whether the two trees are structurally identical.
+	// Nested textRecord.Diff and textRecord.Comment fields are compared.
+	Equal(textNode) bool
+	// String returns the string representation of the text tree.
+	// It is not guaranteed that len(x.String()) == x.Len(),
+	// nor that x.String() == y.String() implies that x.Equal(y).
+	String() string
+
+	// formatCompactTo formats the contents of the tree as a single-line string
+	// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+	// fields are ignored.
+	//
+	// However, not all nodes in the tree should be collapsed as a single-line.
+	// If a node can be collapsed as a single-line, it is replaced by a textLine
+	// node. Since the top-level node cannot replace itself, this also returns
+	// the current node itself.
+	//
+	// This does not mutate the receiver.
+	formatCompactTo([]byte, diffMode) ([]byte, textNode)
+	// formatExpandedTo formats the contents of the tree as a multi-line string
+	// to the provided buffer. In order for column alignment to operate well,
+	// formatCompactTo must be called before calling formatExpandedTo.
+	formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+	Prefix   string      // e.g., "bytes.Buffer{"
+	Value    textNode    // textWrap | textList | textLine
+	Suffix   string      // e.g., "}"
+	Metadata interface{} // arbitrary metadata; has no effect on formatting
+}
+
+func (s *textWrap) Len() int {
+	return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 *textWrap) Equal(s2 textNode) bool {
+	if s2, ok := s2.(*textWrap); ok {
+		return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+	}
+	return false
+}
+func (s *textWrap) String() string {
+	var d diffMode
+	var n indentMode
+	_, s2 := s.formatCompactTo(nil, d)
+	b := n.appendIndent(nil, d)      // Leading indent
+	b = s2.formatExpandedTo(b, d, n) // Main body
+	b = append(b, '\n')              // Trailing newline
+	return string(b)
+}
+func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+	n0 := len(b) // Original buffer length
+	b = append(b, s.Prefix...)
+	b, s.Value = s.Value.formatCompactTo(b, d)
+	b = append(b, s.Suffix...)
+	if _, ok := s.Value.(textLine); ok {
+		return b, textLine(b[n0:])
+	}
+	return b, s
+}
+func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+	b = append(b, s.Prefix...)
+	b = s.Value.formatExpandedTo(b, d, n)
+	b = append(b, s.Suffix...)
+	return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+	Diff       diffMode     // e.g., 0 or '-' or '+'
+	Key        string       // e.g., "MyField"
+	Value      textNode     // textWrap | textLine
+	ElideComma bool         // avoid trailing comma
+	Comment    fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+	hasStats := !ds.IsZero()
+	if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+		if hasStats {
+			*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
+		} else {
+			*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
+		}
+		return
+	}
+	if hasStats {
+		(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+	}
+}
+
+func (s textList) Len() (n int) {
+	for i, r := range s {
+		n += len(r.Key)
+		if r.Key != "" {
+			n += len(": ")
+		}
+		n += r.Value.Len()
+		if i < len(s)-1 {
+			n += len(", ")
+		}
+	}
+	return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+	if s2, ok := s2.(textList); ok {
+		if len(s1) != len(s2) {
+			return false
+		}
+		for i := range s1 {
+			r1, r2 := s1[i], s2[i]
+			if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (s textList) String() string {
+	return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+	s = append(textList(nil), s...) // Avoid mutating original
+
+	// Determine whether we can collapse this list as a single line.
+	n0 := len(b) // Original buffer length
+	var multiLine bool
+	for i, r := range s {
+		if r.Diff == diffInserted || r.Diff == diffRemoved {
+			multiLine = true
+		}
+		b = append(b, r.Key...)
+		if r.Key != "" {
+			b = append(b, ": "...)
+		}
+		b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+		if _, ok := s[i].Value.(textLine); !ok {
+			multiLine = true
+		}
+		if r.Comment != nil {
+			multiLine = true
+		}
+		if i < len(s)-1 {
+			b = append(b, ", "...)
+		}
+	}
+	// Force multi-lined output when printing a removed/inserted node that
+	// is sufficiently long.
+	if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
+		multiLine = true
+	}
+	if !multiLine {
+		return b, textLine(b[n0:])
+	}
+	return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+	alignKeyLens := s.alignLens(
+		func(r textRecord) bool {
+			_, isLine := r.Value.(textLine)
+			return r.Key == "" || !isLine
+		},
+		func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
+	)
+	alignValueLens := s.alignLens(
+		func(r textRecord) bool {
+			_, isLine := r.Value.(textLine)
+			return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+		},
+		func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
+	)
+
+	// Format lists of simple lists in a batched form.
+	// If the list is sequence of only textLine values,
+	// then batch multiple values on a single line.
+	var isSimple bool
+	for _, r := range s {
+		_, isLine := r.Value.(textLine)
+		isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
+		if !isSimple {
+			break
+		}
+	}
+	if isSimple {
+		n++
+		var batch []byte
+		emitBatch := func() {
+			if len(batch) > 0 {
+				b = n.appendIndent(append(b, '\n'), d)
+				b = append(b, bytes.TrimRight(batch, " ")...)
+				batch = batch[:0]
+			}
+		}
+		for _, r := range s {
+			line := r.Value.(textLine)
+			if len(batch)+len(line)+len(", ") > maxColumnLength {
+				emitBatch()
+			}
+			batch = append(batch, line...)
+			batch = append(batch, ", "...)
+		}
+		emitBatch()
+		n--
+		return n.appendIndent(append(b, '\n'), d)
+	}
+
+	// Format the list as a multi-lined output.
+	n++
+	for i, r := range s {
+		b = n.appendIndent(append(b, '\n'), d|r.Diff)
+		if r.Key != "" {
+			b = append(b, r.Key+": "...)
+		}
+		b = alignKeyLens[i].appendChar(b, ' ')
+
+		b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+		if !r.ElideComma {
+			b = append(b, ',')
+		}
+		b = alignValueLens[i].appendChar(b, ' ')
+
+		if r.Comment != nil {
+			b = append(b, " // "+r.Comment.String()...)
+		}
+	}
+	n--
+
+	return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+	skipFunc func(textRecord) bool,
+	lenFunc func(textRecord) int,
+) []repeatCount {
+	var startIdx, endIdx, maxLen int
+	lens := make([]repeatCount, len(s))
+	for i, r := range s {
+		if skipFunc(r) {
+			for j := startIdx; j < endIdx && j < len(s); j++ {
+				lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+			}
+			startIdx, endIdx, maxLen = i+1, i+1, 0
+		} else {
+			if maxLen < lenFunc(r) {
+				maxLen = lenFunc(r)
+			}
+			endIdx = i + 1
+		}
+	}
+	for j := startIdx; j < endIdx && j < len(s); j++ {
+		lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+	}
+	return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+	textNil      = textLine("nil")
+	textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+	return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+	if s2, ok := s2.(textLine); ok {
+		return bytes.Equal([]byte(s1), []byte(s2))
+	}
+	return false
+}
+func (s textLine) String() string {
+	return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+	return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+	return append(b, s...)
+}
+
+type diffStats struct {
+	Name         string
+	NumIgnored   int
+	NumIdentical int
+	NumRemoved   int
+	NumInserted  int
+	NumModified  int
+}
+
+func (s diffStats) IsZero() bool {
+	s.Name = ""
+	return s == diffStats{}
+}
+
+func (s diffStats) NumDiff() int {
+	return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+	assert(s.Name == ds.Name)
+	s.NumIgnored += ds.NumIgnored
+	s.NumIdentical += ds.NumIdentical
+	s.NumRemoved += ds.NumRemoved
+	s.NumInserted += ds.NumInserted
+	s.NumModified += ds.NumModified
+	return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//
+//	diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+	var ss []string
+	var sum int
+	labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+	counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+	for i, n := range counts {
+		if n > 0 {
+			ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+		}
+		sum += n
+	}
+
+	// Pluralize the name (adjusting for some obscure English grammar rules).
+	name := s.Name
+	if sum > 1 {
+		name += "s"
+		if strings.HasSuffix(name, "ys") {
+			name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+		}
+	}
+
+	// Format the list according to English grammar (with Oxford comma).
+	switch n := len(ss); n {
+	case 0:
+		return ""
+	case 1, 2:
+		return strings.Join(ss, " and ") + " " + name
+	default:
+		return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+	}
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 0000000..668d470
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+	parent *valueNode
+
+	Type   reflect.Type
+	ValueX reflect.Value
+	ValueY reflect.Value
+
+	// NumSame is the number of leaf nodes that are equal.
+	// All descendants are equal only if NumDiff is 0.
+	NumSame int
+	// NumDiff is the number of leaf nodes that are not equal.
+	NumDiff int
+	// NumIgnored is the number of leaf nodes that are ignored.
+	NumIgnored int
+	// NumCompared is the number of leaf nodes that were compared
+	// using an Equal method or Comparer function.
+	NumCompared int
+	// NumTransformed is the number of non-leaf nodes that were transformed.
+	NumTransformed int
+	// NumChildren is the number of transitive descendants of this node.
+	// This counts from zero; thus, leaf nodes have no descendants.
+	NumChildren int
+	// MaxDepth is the maximum depth of the tree. This counts from zero;
+	// thus, leaf nodes have a depth of zero.
+	MaxDepth int
+
+	// Records is a list of struct fields, slice elements, or map entries.
+	Records []reportRecord // If populated, implies Value is not populated
+
+	// Value is the result of a transformation, pointer indirect, of
+	// type assertion.
+	Value *valueNode // If populated, implies Records is not populated
+
+	// TransformerName is the name of the transformer.
+	TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+	Key   reflect.Value // Invalid for slice element
+	Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+	vx, vy := ps.Values()
+	child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+	switch s := ps.(type) {
+	case StructField:
+		assert(parent.Value == nil)
+		parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+	case SliceIndex:
+		assert(parent.Value == nil)
+		parent.Records = append(parent.Records, reportRecord{Value: child})
+	case MapIndex:
+		assert(parent.Value == nil)
+		parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+	case Indirect:
+		assert(parent.Value == nil && parent.Records == nil)
+		parent.Value = child
+	case TypeAssertion:
+		assert(parent.Value == nil && parent.Records == nil)
+		parent.Value = child
+	case Transform:
+		assert(parent.Value == nil && parent.Records == nil)
+		parent.Value = child
+		parent.TransformerName = s.Name()
+		parent.NumTransformed++
+	default:
+		assert(parent == nil) // Must be the root step
+	}
+	return child
+}
+
+func (r *valueNode) Report(rs Result) {
+	assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+	if rs.ByIgnore() {
+		r.NumIgnored++
+	} else {
+		if rs.Equal() {
+			r.NumSame++
+		} else {
+			r.NumDiff++
+		}
+	}
+	assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+	if rs.ByMethod() {
+		r.NumCompared++
+	}
+	if rs.ByFunc() {
+		r.NumCompared++
+	}
+	assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+	if child.parent == nil {
+		return nil
+	}
+	parent = child.parent
+	parent.NumSame += child.NumSame
+	parent.NumDiff += child.NumDiff
+	parent.NumIgnored += child.NumIgnored
+	parent.NumCompared += child.NumCompared
+	parent.NumTransformed += child.NumTransformed
+	parent.NumChildren += child.NumChildren + 1
+	if parent.MaxDepth < child.MaxDepth+1 {
+		parent.MaxDepth = child.MaxDepth + 1
+	}
+	return parent
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a6..0000000
--- a/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
-  - 1.4.3
-  - 1.5.3
-  - tip
-
-script:
-  - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09..a502fdc 100644
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
 
 We definitely welcome patches and contribution to this project!
 
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
 ### Legal requirements
 
 In order to protect both you and ourselves, you will need to sign the
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index f765a46..3e9a618 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
 The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
 and DCE 1.1: Authentication and Security Services. 
 
 This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@
 change is the ability to represent an invalid UUID (vs a NIL UUID).
 
 ###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
 
 ###### Documentation 
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
 
 Full `go doc` style documentation for the package can be viewed online without
 installing this package by using the GoDoc site here: 
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b404f4b..dc60082 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@
 	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
 	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
 	Nil           UUID // empty UUID, all zeros
+
+	// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+	Max = UUID{
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	}
 )
 
 // NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
index 24b78ed..b2a0bc8 100644
--- a/vendor/github.com/google/uuid/node_js.go
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
 package uuid
 
 // getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
 // Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
 func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06c..c351129 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@
 }
 
 // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid.  The time is only defined for version 1 and 2 UUIDs.
+// uuid.  The time is only defined for version 1, 2, 6 and 7 UUIDs.
 func (uuid UUID) Time() Time {
-	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
-	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
-	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
-	return Time(time)
+	var t Time
+	switch uuid.Version() {
+	case 6:
+		time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+		t = Time(time)
+	case 7:
+		time := binary.BigEndian.Uint64(uuid[:8])
+		t = Time((time>>16)*10000 + g1582ns100)
+	default: // forward compatible
+		time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+		time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+		time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+		t = Time(time)
+	}
+	return t
 }
 
 // ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index a57207a..5232b48 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -56,11 +56,15 @@
 	return ok
 }
 
-// Parse decodes s into a UUID or returns an error.  Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed.  Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded.  In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g.  {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}.  Only the middle 36 bytes are
+// examined in the latter case.  Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
 func Parse(s string) (UUID, error) {
 	var uuid UUID
 	switch len(s) {
@@ -69,7 +73,7 @@
 
 	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 	case 36 + 9:
-		if strings.ToLower(s[:9]) != "urn:uuid:" {
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
 			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
 		}
 		s = s[9:]
@@ -101,7 +105,8 @@
 		9, 11,
 		14, 16,
 		19, 21,
-		24, 26, 28, 30, 32, 34} {
+		24, 26, 28, 30, 32, 34,
+	} {
 		v, ok := xtob(s[x], s[x+1])
 		if !ok {
 			return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@
 	switch len(b) {
 	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+		if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
 			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
 		}
 		b = b[9:]
@@ -145,7 +150,8 @@
 		9, 11,
 		14, 16,
 		19, 21,
-		24, 26, 28, 30, 32, 34} {
+		24, 26, 28, 30, 32, 34,
+	} {
 		v, ok := xtob(b[x], b[x+1])
 		if !ok {
 			return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@
 	return uuid
 }
 
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+//   xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+//   {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+	switch len(s) {
+	// Standard UUID format
+	case 36:
+
+	// UUID with "urn:uuid:" prefix
+	case 36 + 9:
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
+			return fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// UUID enclosed in braces
+	case 36 + 2:
+		if s[0] != '{' || s[len(s)-1] != '}' {
+			return fmt.Errorf("invalid bracketed UUID format")
+		}
+		s = s[1 : len(s)-1]
+
+	// UUID without hyphens
+	case 32:
+		for i := 0; i < len(s); i += 2 {
+			_, ok := xtob(s[i], s[i+1])
+			if !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+
+	default:
+		return invalidLengthError{len(s)}
+	}
+
+	// Check for standard UUID format
+	if len(s) == 36 {
+		if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+			return errors.New("invalid UUID format")
+		}
+		for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+			if _, ok := xtob(s[x], s[x+1]); !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+	}
+
+	return nil
+}
+
 // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 // , or "" if uuid is invalid.
 func (uuid UUID) String() string {
@@ -292,3 +351,15 @@
 	poolMu.Lock()
 	poolPos = randPoolSize
 }
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+	var uuidStrs = make([]string, len(uuids))
+	for i, uuid := range uuids {
+		uuidStrs[i] = uuid.String()
+	}
+	return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	/*
+	    0                   1                   2                   3
+	    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                           time_high                           |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |           time_mid            |      time_low_and_version     |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |clk_seq_hi_res |  clk_seq_low  |         node (0-1)            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                         node (2-5)                            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+
+	binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+
+	uuid[6] = 0x60 | (uuid[6] & 0x0F)
+	uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	copy(uuid[10:], nodeID[:])
+	nodeMu.Unlock()
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+	uuid, err := NewRandom()
+	if err != nil {
+		return uuid, err
+	}
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+	uuid, err := NewRandomFromReader(r)
+	if err != nil {
+		return uuid, err
+	}
+
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+	/*
+		 0                   1                   2                   3
+		 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                           unix_ts_ms                          |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|          unix_ts_ms           |  ver  |  rand_a (12 bit seq)  |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|var|                        rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                            rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+	_ = uuid[15] // bounds check
+
+	t, s := getV7Time()
+
+	uuid[0] = byte(t >> 40)
+	uuid[1] = byte(t >> 32)
+	uuid[2] = byte(t >> 24)
+	uuid[3] = byte(t >> 16)
+	uuid[4] = byte(t >> 8)
+	uuid[5] = byte(t)
+
+	uuid[6] = 0x70 | (0x0F & byte(s>>8))
+	uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+//	52 bits of time in milliseconds since epoch
+//	12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+	timeMu.Lock()
+	defer timeMu.Unlock()
+
+	nano := timeNow().UnixNano()
+	milli = nano / nanoPerMilli
+	// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+	seq = (nano - milli*nanoPerMilli) >> 8
+	now := milli<<12 + seq
+	if now <= lastV7time {
+		now = lastV7time + 1
+		milli = now >> 12
+		seq = now & 0xfff
+	}
+	lastV7time = now
+	return milli, seq
+}